4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
30 #include "qemu/cutils.h"
31 #include "qemu/bitops.h"
32 #include "qemu/bitmap.h"
33 #include "qemu/madvise.h"
34 #include "qemu/main-loop.h"
36 #include "ram-compress.h"
38 #include "migration.h"
39 #include "migration-stats.h"
40 #include "migration/register.h"
41 #include "migration/misc.h"
42 #include "qemu-file.h"
43 #include "postcopy-ram.h"
44 #include "page_cache.h"
45 #include "qemu/error-report.h"
46 #include "qapi/error.h"
47 #include "qapi/qapi-types-migration.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qapi-commands-migration.h"
50 #include "qapi/qmp/qerror.h"
52 #include "exec/ram_addr.h"
53 #include "exec/target_page.h"
54 #include "qemu/rcu_queue.h"
55 #include "migration/colo.h"
57 #include "sysemu/cpu-throttle.h"
61 #include "sysemu/runstate.h"
64 #include "sysemu/dirtylimit.h"
65 #include "sysemu/kvm.h"
67 #include "hw/boards.h" /* for machine_dump_guest_core() */
69 #if defined(__linux__)
70 #include "qemu/userfaultfd.h"
71 #endif /* defined(__linux__) */
73 /***********************************************************/
74 /* ram save/restore */
77 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
78 * worked for pages that were filled with the same char. We switched
79 * it to only search for the zero value. And to avoid confusion with
80 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
83 * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now
85 #define RAM_SAVE_FLAG_FULL 0x01
86 #define RAM_SAVE_FLAG_ZERO 0x02
87 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
88 #define RAM_SAVE_FLAG_PAGE 0x08
89 #define RAM_SAVE_FLAG_EOS 0x10
90 #define RAM_SAVE_FLAG_CONTINUE 0x20
91 #define RAM_SAVE_FLAG_XBZRLE 0x40
92 /* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */
93 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
94 #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
95 /* We can't use any flag that is bigger than 0x200 */
98 * mapped-ram migration supports O_DIRECT, so we need to make sure the
99 * userspace buffer, the IO operation size and the file offset are
100 * aligned according to the underlying device's block size. The first
101 * two are already aligned to page size, but we need to add padding to
102 * the file to align the offset. We cannot read the block size
103 * dynamically because the migration file can be moved between
104 * different systems, so use 1M to cover most block sizes and to keep
105 * the file offset aligned at page size as well.
107 #define MAPPED_RAM_FILE_OFFSET_ALIGNMENT 0x100000
110 * When doing mapped-ram migration, this is the amount we read from
111 * the pages region in the migration file at a time.
113 #define MAPPED_RAM_LOAD_BUF_SIZE 0x100000
115 XBZRLECacheStats xbzrle_counters
;
117 /* used by the search for pages to send */
118 struct PageSearchStatus
{
119 /* The migration channel used for a specific host page */
120 QEMUFile
*pss_channel
;
121 /* Last block from where we have sent data */
122 RAMBlock
*last_sent_block
;
123 /* Current block being searched */
125 /* Current page to search from */
127 /* Set once we wrap around */
129 /* Whether we're sending a host page */
130 bool host_page_sending
;
131 /* The start/end of current host page. Invalid if host_page_sending==false */
132 unsigned long host_page_start
;
133 unsigned long host_page_end
;
135 typedef struct PageSearchStatus PageSearchStatus
;
137 /* struct contains XBZRLE cache and a static page
138 used by the compression */
140 /* buffer used for XBZRLE encoding */
141 uint8_t *encoded_buf
;
142 /* buffer for storing page content */
143 uint8_t *current_buf
;
144 /* Cache for XBZRLE, Protected by lock. */
147 /* it will store a page full of zeros */
148 uint8_t *zero_target_page
;
149 /* buffer used for XBZRLE decoding */
150 uint8_t *decoded_buf
;
153 static void XBZRLE_cache_lock(void)
155 if (migrate_xbzrle()) {
156 qemu_mutex_lock(&XBZRLE
.lock
);
160 static void XBZRLE_cache_unlock(void)
162 if (migrate_xbzrle()) {
163 qemu_mutex_unlock(&XBZRLE
.lock
);
168 * xbzrle_cache_resize: resize the xbzrle cache
170 * This function is called from migrate_params_apply in main
171 * thread, possibly while a migration is in progress. A running
172 * migration may be using the cache and might finish during this call,
173 * hence changes to the cache are protected by XBZRLE.lock().
175 * Returns 0 for success or -1 for error
177 * @new_size: new cache size
178 * @errp: set *errp if the check failed, with reason
180 int xbzrle_cache_resize(uint64_t new_size
, Error
**errp
)
182 PageCache
*new_cache
;
185 /* Check for truncation */
186 if (new_size
!= (size_t)new_size
) {
187 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
188 "exceeding address space");
192 if (new_size
== migrate_xbzrle_cache_size()) {
199 if (XBZRLE
.cache
!= NULL
) {
200 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
206 cache_fini(XBZRLE
.cache
);
207 XBZRLE
.cache
= new_cache
;
210 XBZRLE_cache_unlock();
214 static bool postcopy_preempt_active(void)
216 return migrate_postcopy_preempt() && migration_in_postcopy();
219 bool migrate_ram_is_ignored(RAMBlock
*block
)
221 return !qemu_ram_is_migratable(block
) ||
222 (migrate_ignore_shared() && qemu_ram_is_shared(block
)
223 && qemu_ram_is_named_file(block
));
226 #undef RAMBLOCK_FOREACH
228 int foreach_not_ignored_block(RAMBlockIterFunc func
, void *opaque
)
233 RCU_READ_LOCK_GUARD();
235 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
236 ret
= func(block
, opaque
);
244 static void ramblock_recv_map_init(void)
248 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
249 assert(!rb
->receivedmap
);
250 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
254 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
256 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
260 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
262 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
265 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
267 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
270 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
273 bitmap_set_atomic(rb
->receivedmap
,
274 ramblock_recv_bitmap_offset(host_addr
, rb
),
278 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
281 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
283 * Returns >0 if success with sent bytes, or <0 if error.
285 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
286 const char *block_name
)
288 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
289 unsigned long *le_bitmap
, nbits
;
293 error_report("%s: invalid block name: %s", __func__
, block_name
);
297 nbits
= block
->postcopy_length
>> TARGET_PAGE_BITS
;
300 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
301 * machines we may need 4 more bytes for padding (see below
302 * comment). So extend it a bit before hand.
304 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
307 * Always use little endian when sending the bitmap. This is
308 * required that when source and destination VMs are not using the
309 * same endianness. (Note: big endian won't work.)
311 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
313 /* Size of the bitmap, in bytes */
314 size
= DIV_ROUND_UP(nbits
, 8);
317 * size is always aligned to 8 bytes for 64bit machines, but it
318 * may not be true for 32bit machines. We need this padding to
319 * make sure the migration can survive even between 32bit and
322 size
= ROUND_UP(size
, 8);
324 qemu_put_be64(file
, size
);
325 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
328 * Mark as an end, in case the middle part is screwed up due to
329 * some "mysterious" reason.
331 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
332 int ret
= qemu_fflush(file
);
337 return size
+ sizeof(size
);
341 * An outstanding page request, on the source, having been received
344 struct RAMSrcPageRequest
{
349 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
352 /* State of RAM for migration */
355 * PageSearchStatus structures for the channels when send pages.
356 * Protected by the bitmap_mutex.
358 PageSearchStatus pss
[RAM_CHANNEL_MAX
];
359 /* UFFD file descriptor, used in 'write-tracking' migration */
361 /* total ram size in bytes */
362 uint64_t ram_bytes_total
;
363 /* Last block that we have visited searching for dirty pages */
364 RAMBlock
*last_seen_block
;
365 /* Last dirty target page we have sent */
366 ram_addr_t last_page
;
367 /* last ram version we have seen */
368 uint32_t last_version
;
369 /* How many times we have dirty too many pages */
370 int dirty_rate_high_cnt
;
371 /* these variables are used for bitmap sync */
372 /* last time we did a full bitmap_sync */
373 int64_t time_last_bitmap_sync
;
374 /* bytes transferred at start_time */
375 uint64_t bytes_xfer_prev
;
376 /* number of dirty pages since start_time */
377 uint64_t num_dirty_pages_period
;
378 /* xbzrle misses since the beginning of the period */
379 uint64_t xbzrle_cache_miss_prev
;
380 /* Amount of xbzrle pages since the beginning of the period */
381 uint64_t xbzrle_pages_prev
;
382 /* Amount of xbzrle encoded bytes since the beginning of the period */
383 uint64_t xbzrle_bytes_prev
;
384 /* Are we really using XBZRLE (e.g., after the first round). */
386 /* Are we on the last stage of migration */
389 /* total handled target pages at the beginning of period */
390 uint64_t target_page_count_prev
;
391 /* total handled target pages since start */
392 uint64_t target_page_count
;
393 /* number of dirty bits in the bitmap */
394 uint64_t migration_dirty_pages
;
397 * - dirty/clear bitmap
398 * - migration_dirty_pages
401 QemuMutex bitmap_mutex
;
402 /* The RAMBlock used in the last src_page_requests */
403 RAMBlock
*last_req_rb
;
404 /* Queue of outstanding page requests from the destination */
405 QemuMutex src_page_req_mutex
;
406 QSIMPLEQ_HEAD(, RAMSrcPageRequest
) src_page_requests
;
409 * This is only used when postcopy is in recovery phase, to communicate
410 * between the migration thread and the return path thread on dirty
411 * bitmap synchronizations. This field is unused in other stages of
414 unsigned int postcopy_bmap_sync_requested
;
416 typedef struct RAMState RAMState
;
418 static RAMState
*ram_state
;
420 static NotifierWithReturnList precopy_notifier_list
;
422 /* Whether postcopy has queued requests? */
423 static bool postcopy_has_request(RAMState
*rs
)
425 return !QSIMPLEQ_EMPTY_ATOMIC(&rs
->src_page_requests
);
428 void precopy_infrastructure_init(void)
430 notifier_with_return_list_init(&precopy_notifier_list
);
433 void precopy_add_notifier(NotifierWithReturn
*n
)
435 notifier_with_return_list_add(&precopy_notifier_list
, n
);
438 void precopy_remove_notifier(NotifierWithReturn
*n
)
440 notifier_with_return_remove(n
);
443 int precopy_notify(PrecopyNotifyReason reason
, Error
**errp
)
445 PrecopyNotifyData pnd
;
448 return notifier_with_return_list_notify(&precopy_notifier_list
, &pnd
, errp
);
451 uint64_t ram_bytes_remaining(void)
453 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
457 void ram_transferred_add(uint64_t bytes
)
459 if (runstate_is_running()) {
460 stat64_add(&mig_stats
.precopy_bytes
, bytes
);
461 } else if (migration_in_postcopy()) {
462 stat64_add(&mig_stats
.postcopy_bytes
, bytes
);
464 stat64_add(&mig_stats
.downtime_bytes
, bytes
);
468 struct MigrationOps
{
469 int (*ram_save_target_page
)(RAMState
*rs
, PageSearchStatus
*pss
);
471 typedef struct MigrationOps MigrationOps
;
473 MigrationOps
*migration_ops
;
475 static int ram_save_host_page_urgent(PageSearchStatus
*pss
);
477 /* NOTE: page is the PFN not real ram_addr_t. */
478 static void pss_init(PageSearchStatus
*pss
, RAMBlock
*rb
, ram_addr_t page
)
482 pss
->complete_round
= false;
486 * Check whether two PSSs are actively sending the same page. Return true
487 * if it is, false otherwise.
489 static bool pss_overlap(PageSearchStatus
*pss1
, PageSearchStatus
*pss2
)
491 return pss1
->host_page_sending
&& pss2
->host_page_sending
&&
492 (pss1
->host_page_start
== pss2
->host_page_start
);
496 * save_page_header: write page header to wire
498 * If this is the 1st block, it also writes the block identification
500 * Returns the number of bytes written
502 * @pss: current PSS channel status
503 * @block: block that contains the page we want to send
504 * @offset: offset inside the block for the page
505 * in the lower bits, it contains flags
507 static size_t save_page_header(PageSearchStatus
*pss
, QEMUFile
*f
,
508 RAMBlock
*block
, ram_addr_t offset
)
511 bool same_block
= (block
== pss
->last_sent_block
);
514 offset
|= RAM_SAVE_FLAG_CONTINUE
;
516 qemu_put_be64(f
, offset
);
520 len
= strlen(block
->idstr
);
521 qemu_put_byte(f
, len
);
522 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
524 pss
->last_sent_block
= block
;
530 * mig_throttle_guest_down: throttle down the guest
532 * Reduce amount of guest cpu execution to hopefully slow down memory
533 * writes. If guest dirty memory rate is reduced below the rate at
534 * which we can transfer pages to the destination then we should be
535 * able to complete migration. Some workloads dirty memory way too
536 * fast and will not effectively converge, even with auto-converge.
538 static void mig_throttle_guest_down(uint64_t bytes_dirty_period
,
539 uint64_t bytes_dirty_threshold
)
541 uint64_t pct_initial
= migrate_cpu_throttle_initial();
542 uint64_t pct_increment
= migrate_cpu_throttle_increment();
543 bool pct_tailslow
= migrate_cpu_throttle_tailslow();
544 int pct_max
= migrate_max_cpu_throttle();
546 uint64_t throttle_now
= cpu_throttle_get_percentage();
547 uint64_t cpu_now
, cpu_ideal
, throttle_inc
;
549 /* We have not started throttling yet. Let's start it. */
550 if (!cpu_throttle_active()) {
551 cpu_throttle_set(pct_initial
);
553 /* Throttling already on, just increase the rate */
555 throttle_inc
= pct_increment
;
557 /* Compute the ideal CPU percentage used by Guest, which may
558 * make the dirty rate match the dirty rate threshold. */
559 cpu_now
= 100 - throttle_now
;
560 cpu_ideal
= cpu_now
* (bytes_dirty_threshold
* 1.0 /
562 throttle_inc
= MIN(cpu_now
- cpu_ideal
, pct_increment
);
564 cpu_throttle_set(MIN(throttle_now
+ throttle_inc
, pct_max
));
568 void mig_throttle_counter_reset(void)
570 RAMState
*rs
= ram_state
;
572 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
573 rs
->num_dirty_pages_period
= 0;
574 rs
->bytes_xfer_prev
= migration_transferred_bytes();
578 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
580 * @current_addr: address for the zero page
582 * Update the xbzrle cache to reflect a page that's been sent as all 0.
583 * The important thing is that a stale (not-yet-0'd) page be replaced
585 * As a bonus, if the page wasn't in the cache it gets added so that
586 * when a small write is made into the 0'd page it gets XBZRLE sent.
588 static void xbzrle_cache_zero_page(ram_addr_t current_addr
)
590 /* We don't care if this fails to allocate a new cache page
591 * as long as it updated an old one */
592 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
593 stat64_get(&mig_stats
.dirty_sync_count
));
596 #define ENCODING_FLAG_XBZRLE 0x1
599 * save_xbzrle_page: compress and send current page
601 * Returns: 1 means that we wrote the page
602 * 0 means that page is identical to the one already sent
603 * -1 means that xbzrle would be longer than normal
605 * @rs: current RAM state
606 * @pss: current PSS channel
607 * @current_data: pointer to the address of the page contents
608 * @current_addr: addr of the page
609 * @block: block that contains the page we want to send
610 * @offset: offset inside the block for the page
612 static int save_xbzrle_page(RAMState
*rs
, PageSearchStatus
*pss
,
613 uint8_t **current_data
, ram_addr_t current_addr
,
614 RAMBlock
*block
, ram_addr_t offset
)
616 int encoded_len
= 0, bytes_xbzrle
;
617 uint8_t *prev_cached_page
;
618 QEMUFile
*file
= pss
->pss_channel
;
619 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
621 if (!cache_is_cached(XBZRLE
.cache
, current_addr
, generation
)) {
622 xbzrle_counters
.cache_miss
++;
623 if (!rs
->last_stage
) {
624 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
628 /* update *current_data when the page has been
629 inserted into cache */
630 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
637 * Reaching here means the page has hit the xbzrle cache, no matter what
638 * encoding result it is (normal encoding, overflow or skipping the page),
639 * count the page as encoded. This is used to calculate the encoding rate.
641 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
642 * 2nd page turns out to be skipped (i.e. no new bytes written to the
643 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
644 * skipped page included. In this way, the encoding rate can tell if the
645 * guest page is good for xbzrle encoding.
647 xbzrle_counters
.pages
++;
648 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
650 /* save current buffer into memory */
651 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
653 /* XBZRLE encoding (if there is no overflow) */
654 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
655 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
659 * Update the cache contents, so that it corresponds to the data
660 * sent, in all cases except where we skip the page.
662 if (!rs
->last_stage
&& encoded_len
!= 0) {
663 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
665 * In the case where we couldn't compress, ensure that the caller
666 * sends the data from the cache, since the guest might have
667 * changed the RAM since we copied it.
669 *current_data
= prev_cached_page
;
672 if (encoded_len
== 0) {
673 trace_save_xbzrle_page_skipping();
675 } else if (encoded_len
== -1) {
676 trace_save_xbzrle_page_overflow();
677 xbzrle_counters
.overflow
++;
678 xbzrle_counters
.bytes
+= TARGET_PAGE_SIZE
;
682 /* Send XBZRLE based compressed page */
683 bytes_xbzrle
= save_page_header(pss
, pss
->pss_channel
, block
,
684 offset
| RAM_SAVE_FLAG_XBZRLE
);
685 qemu_put_byte(file
, ENCODING_FLAG_XBZRLE
);
686 qemu_put_be16(file
, encoded_len
);
687 qemu_put_buffer(file
, XBZRLE
.encoded_buf
, encoded_len
);
688 bytes_xbzrle
+= encoded_len
+ 1 + 2;
690 * Like compressed_size (please see update_compress_thread_counts),
691 * the xbzrle encoded bytes don't count the 8 byte header with
692 * RAM_SAVE_FLAG_CONTINUE.
694 xbzrle_counters
.bytes
+= bytes_xbzrle
- 8;
695 ram_transferred_add(bytes_xbzrle
);
701 * pss_find_next_dirty: find the next dirty page of current ramblock
703 * This function updates pss->page to point to the next dirty page index
704 * within the ramblock to migrate, or the end of ramblock when nothing
705 * found. Note that when pss->host_page_sending==true it means we're
706 * during sending a host page, so we won't look for dirty page that is
707 * outside the host page boundary.
709 * @pss: the current page search status
711 static void pss_find_next_dirty(PageSearchStatus
*pss
)
713 RAMBlock
*rb
= pss
->block
;
714 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
715 unsigned long *bitmap
= rb
->bmap
;
717 if (migrate_ram_is_ignored(rb
)) {
718 /* Points directly to the end, so we know no dirty page */
724 * If during sending a host page, only look for dirty pages within the
725 * current host page being send.
727 if (pss
->host_page_sending
) {
728 assert(pss
->host_page_end
);
729 size
= MIN(size
, pss
->host_page_end
);
732 pss
->page
= find_next_bit(bitmap
, size
, pss
->page
);
735 static void migration_clear_memory_region_dirty_bitmap(RAMBlock
*rb
,
741 if (!rb
->clear_bmap
|| !clear_bmap_test_and_clear(rb
, page
)) {
745 shift
= rb
->clear_bmap_shift
;
747 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
748 * can make things easier sometimes since then start address
749 * of the small chunk will always be 64 pages aligned so the
750 * bitmap will always be aligned to unsigned long. We should
751 * even be able to remove this restriction but I'm simply
756 size
= 1ULL << (TARGET_PAGE_BITS
+ shift
);
757 start
= QEMU_ALIGN_DOWN((ram_addr_t
)page
<< TARGET_PAGE_BITS
, size
);
758 trace_migration_bitmap_clear_dirty(rb
->idstr
, start
, size
, page
);
759 memory_region_clear_dirty_bitmap(rb
->mr
, start
, size
);
763 migration_clear_memory_region_dirty_bitmap_range(RAMBlock
*rb
,
765 unsigned long npages
)
767 unsigned long i
, chunk_pages
= 1UL << rb
->clear_bmap_shift
;
768 unsigned long chunk_start
= QEMU_ALIGN_DOWN(start
, chunk_pages
);
769 unsigned long chunk_end
= QEMU_ALIGN_UP(start
+ npages
, chunk_pages
);
772 * Clear pages from start to start + npages - 1, so the end boundary is
775 for (i
= chunk_start
; i
< chunk_end
; i
+= chunk_pages
) {
776 migration_clear_memory_region_dirty_bitmap(rb
, i
);
781 * colo_bitmap_find_diry:find contiguous dirty pages from start
783 * Returns the page offset within memory region of the start of the contiguout
786 * @rs: current RAM state
787 * @rb: RAMBlock where to search for dirty pages
788 * @start: page where we start the search
789 * @num: the number of contiguous dirty pages
792 unsigned long colo_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
793 unsigned long start
, unsigned long *num
)
795 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
796 unsigned long *bitmap
= rb
->bmap
;
797 unsigned long first
, next
;
801 if (migrate_ram_is_ignored(rb
)) {
805 first
= find_next_bit(bitmap
, size
, start
);
809 next
= find_next_zero_bit(bitmap
, size
, first
+ 1);
810 assert(next
>= first
);
815 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
822 * Clear dirty bitmap if needed. This _must_ be called before we
823 * send any of the page in the chunk because we need to make sure
824 * we can capture further page content changes when we sync dirty
825 * log the next time. So as long as we are going to send any of
826 * the page in the chunk we clear the remote dirty bitmap for all.
827 * Clearing it earlier won't be a problem, but too late will.
829 migration_clear_memory_region_dirty_bitmap(rb
, page
);
831 ret
= test_and_clear_bit(page
, rb
->bmap
);
833 rs
->migration_dirty_pages
--;
839 static void dirty_bitmap_clear_section(MemoryRegionSection
*section
,
842 const hwaddr offset
= section
->offset_within_region
;
843 const hwaddr size
= int128_get64(section
->size
);
844 const unsigned long start
= offset
>> TARGET_PAGE_BITS
;
845 const unsigned long npages
= size
>> TARGET_PAGE_BITS
;
846 RAMBlock
*rb
= section
->mr
->ram_block
;
847 uint64_t *cleared_bits
= opaque
;
850 * We don't grab ram_state->bitmap_mutex because we expect to run
851 * only when starting migration or during postcopy recovery where
852 * we don't have concurrent access.
854 if (!migration_in_postcopy() && !migrate_background_snapshot()) {
855 migration_clear_memory_region_dirty_bitmap_range(rb
, start
, npages
);
857 *cleared_bits
+= bitmap_count_one_with_offset(rb
->bmap
, start
, npages
);
858 bitmap_clear(rb
->bmap
, start
, npages
);
862 * Exclude all dirty pages from migration that fall into a discarded range as
863 * managed by a RamDiscardManager responsible for the mapped memory region of
864 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
866 * Discarded pages ("logically unplugged") have undefined content and must
867 * not get migrated, because even reading these pages for migration might
868 * result in undesired behavior.
870 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
872 * Note: The result is only stable while migrating (precopy/postcopy).
874 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock
*rb
)
876 uint64_t cleared_bits
= 0;
878 if (rb
->mr
&& rb
->bmap
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
879 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
880 MemoryRegionSection section
= {
882 .offset_within_region
= 0,
883 .size
= int128_make64(qemu_ram_get_used_length(rb
)),
886 ram_discard_manager_replay_discarded(rdm
, §ion
,
887 dirty_bitmap_clear_section
,
894 * Check if a host-page aligned page falls into a discarded range as managed by
895 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
897 * Note: The result is only stable while migrating (precopy/postcopy).
899 bool ramblock_page_is_discarded(RAMBlock
*rb
, ram_addr_t start
)
901 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
902 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
903 MemoryRegionSection section
= {
905 .offset_within_region
= start
,
906 .size
= int128_make64(qemu_ram_pagesize(rb
)),
909 return !ram_discard_manager_is_populated(rdm
, §ion
);
914 /* Called with RCU critical section */
915 static void ramblock_sync_dirty_bitmap(RAMState
*rs
, RAMBlock
*rb
)
917 uint64_t new_dirty_pages
=
918 cpu_physical_memory_sync_dirty_bitmap(rb
, 0, rb
->used_length
);
920 rs
->migration_dirty_pages
+= new_dirty_pages
;
921 rs
->num_dirty_pages_period
+= new_dirty_pages
;
925 * ram_pagesize_summary: calculate all the pagesizes of a VM
927 * Returns a summary bitmap of the page sizes of all RAMBlocks
929 * For VMs with just normal pages this is equivalent to the host page
930 * size. If it's got some huge pages then it's the OR of all the
931 * different page sizes.
933 uint64_t ram_pagesize_summary(void)
936 uint64_t summary
= 0;
938 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
939 summary
|= block
->page_size
;
945 uint64_t ram_get_total_transferred_pages(void)
947 return stat64_get(&mig_stats
.normal_pages
) +
948 stat64_get(&mig_stats
.zero_pages
) +
949 compress_ram_pages() + xbzrle_counters
.pages
;
952 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
954 uint64_t page_count
= rs
->target_page_count
- rs
->target_page_count_prev
;
956 /* calculate period counters */
957 stat64_set(&mig_stats
.dirty_pages_rate
,
958 rs
->num_dirty_pages_period
* 1000 /
959 (end_time
- rs
->time_last_bitmap_sync
));
965 if (migrate_xbzrle()) {
966 double encoded_size
, unencoded_size
;
968 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
969 rs
->xbzrle_cache_miss_prev
) / page_count
;
970 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
971 unencoded_size
= (xbzrle_counters
.pages
- rs
->xbzrle_pages_prev
) *
973 encoded_size
= xbzrle_counters
.bytes
- rs
->xbzrle_bytes_prev
;
974 if (xbzrle_counters
.pages
== rs
->xbzrle_pages_prev
|| !encoded_size
) {
975 xbzrle_counters
.encoding_rate
= 0;
977 xbzrle_counters
.encoding_rate
= unencoded_size
/ encoded_size
;
979 rs
->xbzrle_pages_prev
= xbzrle_counters
.pages
;
980 rs
->xbzrle_bytes_prev
= xbzrle_counters
.bytes
;
982 compress_update_rates(page_count
);
986 * Enable dirty-limit to throttle down the guest
988 static void migration_dirty_limit_guest(void)
991 * dirty page rate quota for all vCPUs fetched from
992 * migration parameter 'vcpu_dirty_limit'
994 static int64_t quota_dirtyrate
;
995 MigrationState
*s
= migrate_get_current();
998 * If dirty limit already enabled and migration parameter
999 * vcpu-dirty-limit untouched.
1001 if (dirtylimit_in_service() &&
1002 quota_dirtyrate
== s
->parameters
.vcpu_dirty_limit
) {
1006 quota_dirtyrate
= s
->parameters
.vcpu_dirty_limit
;
1009 * Set all vCPU a quota dirtyrate, note that the second
1010 * parameter will be ignored if setting all vCPU for the vm
1012 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate
, NULL
);
1013 trace_migration_dirty_limit_guest(quota_dirtyrate
);
1016 static void migration_trigger_throttle(RAMState
*rs
)
1018 uint64_t threshold
= migrate_throttle_trigger_threshold();
1019 uint64_t bytes_xfer_period
=
1020 migration_transferred_bytes() - rs
->bytes_xfer_prev
;
1021 uint64_t bytes_dirty_period
= rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
;
1022 uint64_t bytes_dirty_threshold
= bytes_xfer_period
* threshold
/ 100;
1024 /* During block migration the auto-converge logic incorrectly detects
1025 * that ram migration makes no progress. Avoid this by disabling the
1026 * throttling logic during the bulk phase of block migration. */
1027 if (blk_mig_bulk_active()) {
1032 * The following detection logic can be refined later. For now:
1033 * Check to see if the ratio between dirtied bytes and the approx.
1034 * amount of bytes that just got transferred since the last time
1035 * we were in this routine reaches the threshold. If that happens
1036 * twice, start or increase throttling.
1038 if ((bytes_dirty_period
> bytes_dirty_threshold
) &&
1039 (++rs
->dirty_rate_high_cnt
>= 2)) {
1040 rs
->dirty_rate_high_cnt
= 0;
1041 if (migrate_auto_converge()) {
1042 trace_migration_throttle();
1043 mig_throttle_guest_down(bytes_dirty_period
,
1044 bytes_dirty_threshold
);
1045 } else if (migrate_dirty_limit()) {
1046 migration_dirty_limit_guest();
1051 static void migration_bitmap_sync(RAMState
*rs
, bool last_stage
)
1056 stat64_add(&mig_stats
.dirty_sync_count
, 1);
1058 if (!rs
->time_last_bitmap_sync
) {
1059 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1062 trace_migration_bitmap_sync_start();
1063 memory_global_dirty_log_sync(last_stage
);
1065 qemu_mutex_lock(&rs
->bitmap_mutex
);
1066 WITH_RCU_READ_LOCK_GUARD() {
1067 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1068 ramblock_sync_dirty_bitmap(rs
, block
);
1070 stat64_set(&mig_stats
.dirty_bytes_last_sync
, ram_bytes_remaining());
1072 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1074 memory_global_after_dirty_log_sync();
1075 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1077 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1079 /* more than 1 second = 1000 millisecons */
1080 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1081 migration_trigger_throttle(rs
);
1083 migration_update_rates(rs
, end_time
);
1085 rs
->target_page_count_prev
= rs
->target_page_count
;
1087 /* reset period counters */
1088 rs
->time_last_bitmap_sync
= end_time
;
1089 rs
->num_dirty_pages_period
= 0;
1090 rs
->bytes_xfer_prev
= migration_transferred_bytes();
1092 if (migrate_events()) {
1093 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
1094 qapi_event_send_migration_pass(generation
);
1098 static void migration_bitmap_sync_precopy(RAMState
*rs
, bool last_stage
)
1100 Error
*local_err
= NULL
;
1103 * The current notifier usage is just an optimization to migration, so we
1104 * don't stop the normal migration process in the error case.
1106 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC
, &local_err
)) {
1107 error_report_err(local_err
);
1111 migration_bitmap_sync(rs
, last_stage
);
1113 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC
, &local_err
)) {
1114 error_report_err(local_err
);
1118 void ram_release_page(const char *rbname
, uint64_t offset
)
1120 if (!migrate_release_ram() || !migration_in_postcopy()) {
1124 ram_discard_range(rbname
, offset
, TARGET_PAGE_SIZE
);
1128 * save_zero_page: send the zero page to the stream
1130 * Returns the number of pages written.
1132 * @rs: current RAM state
1133 * @pss: current PSS channel
1134 * @offset: offset inside the block for the page
1136 static int save_zero_page(RAMState
*rs
, PageSearchStatus
*pss
,
1139 uint8_t *p
= pss
->block
->host
+ offset
;
1140 QEMUFile
*file
= pss
->pss_channel
;
1143 if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_NONE
) {
1147 if (!buffer_is_zero(p
, TARGET_PAGE_SIZE
)) {
1151 stat64_add(&mig_stats
.zero_pages
, 1);
1153 if (migrate_mapped_ram()) {
1154 /* zero pages are not transferred with mapped-ram */
1155 clear_bit_atomic(offset
>> TARGET_PAGE_BITS
, pss
->block
->file_bmap
);
1159 len
+= save_page_header(pss
, file
, pss
->block
, offset
| RAM_SAVE_FLAG_ZERO
);
1160 qemu_put_byte(file
, 0);
1162 ram_release_page(pss
->block
->idstr
, offset
);
1163 ram_transferred_add(len
);
1166 * Must let xbzrle know, otherwise a previous (now 0'd) cached
1167 * page would be stale.
1169 if (rs
->xbzrle_started
) {
1170 XBZRLE_cache_lock();
1171 xbzrle_cache_zero_page(pss
->block
->offset
+ offset
);
1172 XBZRLE_cache_unlock();
1179 * @pages: the number of pages written by the control path,
1181 * > 0 - number of pages written
1183 * Return true if the pages has been saved, otherwise false is returned.
1185 static bool control_save_page(PageSearchStatus
*pss
,
1186 ram_addr_t offset
, int *pages
)
1190 ret
= rdma_control_save_page(pss
->pss_channel
, pss
->block
->offset
, offset
,
1192 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1196 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1205 * directly send the page to the stream
1207 * Returns the number of pages written.
1209 * @pss: current PSS channel
1210 * @block: block that contains the page we want to send
1211 * @offset: offset inside the block for the page
1212 * @buf: the page to be sent
1213 * @async: send to page asyncly
1215 static int save_normal_page(PageSearchStatus
*pss
, RAMBlock
*block
,
1216 ram_addr_t offset
, uint8_t *buf
, bool async
)
1218 QEMUFile
*file
= pss
->pss_channel
;
1220 if (migrate_mapped_ram()) {
1221 qemu_put_buffer_at(file
, buf
, TARGET_PAGE_SIZE
,
1222 block
->pages_offset
+ offset
);
1223 set_bit(offset
>> TARGET_PAGE_BITS
, block
->file_bmap
);
1225 ram_transferred_add(save_page_header(pss
, pss
->pss_channel
, block
,
1226 offset
| RAM_SAVE_FLAG_PAGE
));
1228 qemu_put_buffer_async(file
, buf
, TARGET_PAGE_SIZE
,
1229 migrate_release_ram() &&
1230 migration_in_postcopy());
1232 qemu_put_buffer(file
, buf
, TARGET_PAGE_SIZE
);
1235 ram_transferred_add(TARGET_PAGE_SIZE
);
1236 stat64_add(&mig_stats
.normal_pages
, 1);
1241 * ram_save_page: send the given page to the stream
1243 * Returns the number of pages written.
1245 * >=0 - Number of pages written - this might legally be 0
1246 * if xbzrle noticed the page was the same.
1248 * @rs: current RAM state
1249 * @block: block that contains the page we want to send
1250 * @offset: offset inside the block for the page
1252 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
)
1256 bool send_async
= true;
1257 RAMBlock
*block
= pss
->block
;
1258 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
1259 ram_addr_t current_addr
= block
->offset
+ offset
;
1261 p
= block
->host
+ offset
;
1262 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1264 XBZRLE_cache_lock();
1265 if (rs
->xbzrle_started
&& !migration_in_postcopy()) {
1266 pages
= save_xbzrle_page(rs
, pss
, &p
, current_addr
,
1268 if (!rs
->last_stage
) {
1269 /* Can't send this cached data async, since the cache page
1270 * might get updated before it gets to the wire
1276 /* XBZRLE overflow or normal page */
1278 pages
= save_normal_page(pss
, block
, offset
, p
, send_async
);
1281 XBZRLE_cache_unlock();
1286 static int ram_save_multifd_page(RAMBlock
*block
, ram_addr_t offset
)
1288 if (!multifd_queue_page(block
, offset
)) {
1295 int compress_send_queued_data(CompressParam
*param
)
1297 PageSearchStatus
*pss
= &ram_state
->pss
[RAM_CHANNEL_PRECOPY
];
1298 MigrationState
*ms
= migrate_get_current();
1299 QEMUFile
*file
= ms
->to_dst_file
;
1302 RAMBlock
*block
= param
->block
;
1303 ram_addr_t offset
= param
->offset
;
1305 if (param
->result
== RES_NONE
) {
1309 assert(block
== pss
->last_sent_block
);
1311 if (param
->result
== RES_ZEROPAGE
) {
1312 assert(qemu_file_buffer_empty(param
->file
));
1313 len
+= save_page_header(pss
, file
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1314 qemu_put_byte(file
, 0);
1316 ram_release_page(block
->idstr
, offset
);
1317 } else if (param
->result
== RES_COMPRESS
) {
1318 assert(!qemu_file_buffer_empty(param
->file
));
1319 len
+= save_page_header(pss
, file
, block
,
1320 offset
| RAM_SAVE_FLAG_COMPRESS_PAGE
);
1321 len
+= qemu_put_qemu_file(file
, param
->file
);
1326 update_compress_thread_counts(param
, len
);
1331 #define PAGE_ALL_CLEAN 0
1332 #define PAGE_TRY_AGAIN 1
1333 #define PAGE_DIRTY_FOUND 2
1335 * find_dirty_block: find the next dirty page and update any state
1336 * associated with the search process.
1339 * <0: An error happened
1340 * PAGE_ALL_CLEAN: no dirty page found, give up
1341 * PAGE_TRY_AGAIN: no dirty page found, retry for next block
1342 * PAGE_DIRTY_FOUND: dirty page found
1344 * @rs: current RAM state
1345 * @pss: data about the state of the current dirty page scan
1346 * @again: set to false if the search has scanned the whole of RAM
1348 static int find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
)
1350 /* Update pss->page for the next dirty bit in ramblock */
1351 pss_find_next_dirty(pss
);
1353 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
1354 pss
->page
>= rs
->last_page
) {
1356 * We've been once around the RAM and haven't found anything.
1359 return PAGE_ALL_CLEAN
;
1361 if (!offset_in_ramblock(pss
->block
,
1362 ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
)) {
1363 /* Didn't find anything in this RAM Block */
1365 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
1367 if (migrate_multifd() &&
1368 (!migrate_multifd_flush_after_each_section() ||
1369 migrate_mapped_ram())) {
1370 QEMUFile
*f
= rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
;
1371 int ret
= multifd_send_sync_main();
1376 if (!migrate_mapped_ram()) {
1377 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
1382 * If memory migration starts over, we will meet a dirtied page
1383 * which may still exists in compression threads's ring, so we
1384 * should flush the compressed data to make sure the new page
1385 * is not overwritten by the old one in the destination.
1387 * Also If xbzrle is on, stop using the data compression at this
1388 * point. In theory, xbzrle can do better than compression.
1390 compress_flush_data();
1392 /* Hit the end of the list */
1393 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1394 /* Flag that we've looped */
1395 pss
->complete_round
= true;
1396 /* After the first round, enable XBZRLE. */
1397 if (migrate_xbzrle()) {
1398 rs
->xbzrle_started
= true;
1401 /* Didn't find anything this time, but try again on the new block */
1402 return PAGE_TRY_AGAIN
;
1404 /* We've found something */
1405 return PAGE_DIRTY_FOUND
;
1410 * unqueue_page: gets a page of the queue
1412 * Helper for 'get_queued_page' - gets a page off the queue
1414 * Returns the block of the page (or NULL if none available)
1416 * @rs: current RAM state
1417 * @offset: used to return the offset within the RAMBlock
1419 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
1421 struct RAMSrcPageRequest
*entry
;
1422 RAMBlock
*block
= NULL
;
1424 if (!postcopy_has_request(rs
)) {
1428 QEMU_LOCK_GUARD(&rs
->src_page_req_mutex
);
1431 * This should _never_ change even after we take the lock, because no one
1432 * should be taking anything off the request list other than us.
1434 assert(postcopy_has_request(rs
));
1436 entry
= QSIMPLEQ_FIRST(&rs
->src_page_requests
);
1438 *offset
= entry
->offset
;
1440 if (entry
->len
> TARGET_PAGE_SIZE
) {
1441 entry
->len
-= TARGET_PAGE_SIZE
;
1442 entry
->offset
+= TARGET_PAGE_SIZE
;
1444 memory_region_unref(block
->mr
);
1445 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1447 migration_consume_urgent_request();
1453 #if defined(__linux__)
1455 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1456 * is found, return RAM block pointer and page offset
1458 * Returns pointer to the RAMBlock containing faulting page,
1459 * NULL if no write faults are pending
1461 * @rs: current RAM state
1462 * @offset: page offset from the beginning of the block
1464 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1466 struct uffd_msg uffd_msg
;
1471 if (!migrate_background_snapshot()) {
1475 res
= uffd_read_events(rs
->uffdio_fd
, &uffd_msg
, 1);
1480 page_address
= (void *)(uintptr_t) uffd_msg
.arg
.pagefault
.address
;
1481 block
= qemu_ram_block_from_host(page_address
, false, offset
);
1482 assert(block
&& (block
->flags
& RAM_UF_WRITEPROTECT
) != 0);
1487 * ram_save_release_protection: release UFFD write protection after
1488 * a range of pages has been saved
1490 * @rs: current RAM state
1491 * @pss: page-search-status structure
1492 * @start_page: index of the first page in the range relative to pss->block
1494 * Returns 0 on success, negative value in case of an error
1496 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1497 unsigned long start_page
)
1501 /* Check if page is from UFFD-managed region. */
1502 if (pss
->block
->flags
& RAM_UF_WRITEPROTECT
) {
1503 void *page_address
= pss
->block
->host
+ (start_page
<< TARGET_PAGE_BITS
);
1504 uint64_t run_length
= (pss
->page
- start_page
) << TARGET_PAGE_BITS
;
1506 /* Flush async buffers before un-protect. */
1507 qemu_fflush(pss
->pss_channel
);
1508 /* Un-protect memory range. */
1509 res
= uffd_change_protection(rs
->uffdio_fd
, page_address
, run_length
,
1516 /* ram_write_tracking_available: check if kernel supports required UFFD features
1518 * Returns true if supports, false otherwise
1520 bool ram_write_tracking_available(void)
1522 uint64_t uffd_features
;
1525 res
= uffd_query_features(&uffd_features
);
1527 (uffd_features
& UFFD_FEATURE_PAGEFAULT_FLAG_WP
) != 0);
1530 /* ram_write_tracking_compatible: check if guest configuration is
1531 * compatible with 'write-tracking'
1533 * Returns true if compatible, false otherwise
1535 bool ram_write_tracking_compatible(void)
1537 const uint64_t uffd_ioctls_mask
= BIT(_UFFDIO_WRITEPROTECT
);
1542 /* Open UFFD file descriptor */
1543 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, false);
1548 RCU_READ_LOCK_GUARD();
1550 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1551 uint64_t uffd_ioctls
;
1553 /* Nothing to do with read-only and MMIO-writable regions */
1554 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1557 /* Try to register block memory via UFFD-IO to track writes */
1558 if (uffd_register_memory(uffd_fd
, block
->host
, block
->max_length
,
1559 UFFDIO_REGISTER_MODE_WP
, &uffd_ioctls
)) {
1562 if ((uffd_ioctls
& uffd_ioctls_mask
) != uffd_ioctls_mask
) {
1569 uffd_close_fd(uffd_fd
);
1573 static inline void populate_read_range(RAMBlock
*block
, ram_addr_t offset
,
1576 const ram_addr_t end
= offset
+ size
;
1579 * We read one byte of each page; this will preallocate page tables if
1580 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1581 * where no page was populated yet. This might require adaption when
1582 * supporting other mappings, like shmem.
1584 for (; offset
< end
; offset
+= block
->page_size
) {
1585 char tmp
= *((char *)block
->host
+ offset
);
1587 /* Don't optimize the read out */
1588 asm volatile("" : "+r" (tmp
));
1592 static inline int populate_read_section(MemoryRegionSection
*section
,
1595 const hwaddr size
= int128_get64(section
->size
);
1596 hwaddr offset
= section
->offset_within_region
;
1597 RAMBlock
*block
= section
->mr
->ram_block
;
1599 populate_read_range(block
, offset
, size
);
1604 * ram_block_populate_read: preallocate page tables and populate pages in the
1605 * RAM block by reading a byte of each page.
1607 * Since it's solely used for userfault_fd WP feature, here we just
1608 * hardcode page size to qemu_real_host_page_size.
1610 * @block: RAM block to populate
1612 static void ram_block_populate_read(RAMBlock
*rb
)
1615 * Skip populating all pages that fall into a discarded range as managed by
1616 * a RamDiscardManager responsible for the mapped memory region of the
1617 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1618 * must not get populated automatically. We don't have to track
1619 * modifications via userfaultfd WP reliably, because these pages will
1620 * not be part of the migration stream either way -- see
1621 * ramblock_dirty_bitmap_exclude_discarded_pages().
1623 * Note: The result is only stable while migrating (precopy/postcopy).
1625 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1626 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1627 MemoryRegionSection section
= {
1629 .offset_within_region
= 0,
1630 .size
= rb
->mr
->size
,
1633 ram_discard_manager_replay_populated(rdm
, §ion
,
1634 populate_read_section
, NULL
);
1636 populate_read_range(rb
, 0, rb
->used_length
);
1641 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1643 void ram_write_tracking_prepare(void)
1647 RCU_READ_LOCK_GUARD();
1649 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1650 /* Nothing to do with read-only and MMIO-writable regions */
1651 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1656 * Populate pages of the RAM block before enabling userfault_fd
1659 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1660 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1661 * pages with pte_none() entries in page table.
1663 ram_block_populate_read(block
);
1667 static inline int uffd_protect_section(MemoryRegionSection
*section
,
1670 const hwaddr size
= int128_get64(section
->size
);
1671 const hwaddr offset
= section
->offset_within_region
;
1672 RAMBlock
*rb
= section
->mr
->ram_block
;
1673 int uffd_fd
= (uintptr_t)opaque
;
1675 return uffd_change_protection(uffd_fd
, rb
->host
+ offset
, size
, true,
1679 static int ram_block_uffd_protect(RAMBlock
*rb
, int uffd_fd
)
1681 assert(rb
->flags
& RAM_UF_WRITEPROTECT
);
1683 /* See ram_block_populate_read() */
1684 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1685 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1686 MemoryRegionSection section
= {
1688 .offset_within_region
= 0,
1689 .size
= rb
->mr
->size
,
1692 return ram_discard_manager_replay_populated(rdm
, §ion
,
1693 uffd_protect_section
,
1694 (void *)(uintptr_t)uffd_fd
);
1696 return uffd_change_protection(uffd_fd
, rb
->host
,
1697 rb
->used_length
, true, false);
1701 * ram_write_tracking_start: start UFFD-WP memory tracking
1703 * Returns 0 for success or negative value in case of error
1705 int ram_write_tracking_start(void)
1708 RAMState
*rs
= ram_state
;
1711 /* Open UFFD file descriptor */
1712 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, true);
1716 rs
->uffdio_fd
= uffd_fd
;
1718 RCU_READ_LOCK_GUARD();
1720 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1721 /* Nothing to do with read-only and MMIO-writable regions */
1722 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1726 /* Register block memory with UFFD to track writes */
1727 if (uffd_register_memory(rs
->uffdio_fd
, block
->host
,
1728 block
->max_length
, UFFDIO_REGISTER_MODE_WP
, NULL
)) {
1731 block
->flags
|= RAM_UF_WRITEPROTECT
;
1732 memory_region_ref(block
->mr
);
1734 /* Apply UFFD write protection to the block memory range */
1735 if (ram_block_uffd_protect(block
, uffd_fd
)) {
1739 trace_ram_write_tracking_ramblock_start(block
->idstr
, block
->page_size
,
1740 block
->host
, block
->max_length
);
1746 error_report("ram_write_tracking_start() failed: restoring initial memory state");
1748 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1749 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1752 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1753 /* Cleanup flags and remove reference */
1754 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1755 memory_region_unref(block
->mr
);
1758 uffd_close_fd(uffd_fd
);
1764 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1766 void ram_write_tracking_stop(void)
1768 RAMState
*rs
= ram_state
;
1771 RCU_READ_LOCK_GUARD();
1773 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1774 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1777 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1779 trace_ram_write_tracking_ramblock_stop(block
->idstr
, block
->page_size
,
1780 block
->host
, block
->max_length
);
1782 /* Cleanup flags and remove reference */
1783 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1784 memory_region_unref(block
->mr
);
1787 /* Finally close UFFD file descriptor */
1788 uffd_close_fd(rs
->uffdio_fd
);
1793 /* No target OS support, stubs just fail or ignore */
1795 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1803 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1804 unsigned long start_page
)
1813 bool ram_write_tracking_available(void)
1818 bool ram_write_tracking_compatible(void)
1824 int ram_write_tracking_start(void)
1830 void ram_write_tracking_stop(void)
1834 #endif /* defined(__linux__) */
1837 * get_queued_page: unqueue a page from the postcopy requests
1839 * Skips pages that are already sent (!dirty)
1841 * Returns true if a queued page is found
1843 * @rs: current RAM state
1844 * @pss: data about the state of the current dirty page scan
1846 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
1853 block
= unqueue_page(rs
, &offset
);
1855 * We're sending this page, and since it's postcopy nothing else
1856 * will dirty it, and we must make sure it doesn't get sent again
1857 * even if this queue request was received after the background
1858 * search already sent it.
1863 page
= offset
>> TARGET_PAGE_BITS
;
1864 dirty
= test_bit(page
, block
->bmap
);
1866 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
1869 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
1873 } while (block
&& !dirty
);
1877 * Poll write faults too if background snapshot is enabled; that's
1878 * when we have vcpus got blocked by the write protected pages.
1880 block
= poll_fault_page(rs
, &offset
);
1885 * We want the background search to continue from the queued page
1886 * since the guest is likely to want other pages near to the page
1887 * it just requested.
1890 pss
->page
= offset
>> TARGET_PAGE_BITS
;
1893 * This unqueued page would break the "one round" check, even is
1896 pss
->complete_round
= false;
1903 * migration_page_queue_free: drop any remaining pages in the ram
1906 * It should be empty at the end anyway, but in error cases there may
1907 * be some left. in case that there is any page left, we drop it.
1910 static void migration_page_queue_free(RAMState
*rs
)
1912 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
1913 /* This queue generally should be empty - but in the case of a failed
1914 * migration might have some droppings in.
1916 RCU_READ_LOCK_GUARD();
1917 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
1918 memory_region_unref(mspr
->rb
->mr
);
1919 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1925 * ram_save_queue_pages: queue the page for transmission
1927 * A request from postcopy destination for example.
1929 * Returns zero on success or negative on error
1931 * @rbname: Name of the RAMBLock of the request. NULL means the
1932 * same that last one.
1933 * @start: starting address from the start of the RAMBlock
1934 * @len: length (in bytes) to send
1936 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
,
1940 RAMState
*rs
= ram_state
;
1942 stat64_add(&mig_stats
.postcopy_requests
, 1);
1943 RCU_READ_LOCK_GUARD();
1946 /* Reuse last RAMBlock */
1947 ramblock
= rs
->last_req_rb
;
1951 * Shouldn't happen, we can't reuse the last RAMBlock if
1952 * it's the 1st request.
1954 error_setg(errp
, "MIG_RP_MSG_REQ_PAGES has no previous block");
1958 ramblock
= qemu_ram_block_by_name(rbname
);
1961 /* We shouldn't be asked for a non-existent RAMBlock */
1962 error_setg(errp
, "MIG_RP_MSG_REQ_PAGES has no block '%s'", rbname
);
1965 rs
->last_req_rb
= ramblock
;
1967 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
1968 if (!offset_in_ramblock(ramblock
, start
+ len
- 1)) {
1969 error_setg(errp
, "MIG_RP_MSG_REQ_PAGES request overrun, "
1970 "start=" RAM_ADDR_FMT
" len="
1971 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
1972 start
, len
, ramblock
->used_length
);
1977 * When with postcopy preempt, we send back the page directly in the
1980 if (postcopy_preempt_active()) {
1981 ram_addr_t page_start
= start
>> TARGET_PAGE_BITS
;
1982 size_t page_size
= qemu_ram_pagesize(ramblock
);
1983 PageSearchStatus
*pss
= &ram_state
->pss
[RAM_CHANNEL_POSTCOPY
];
1986 qemu_mutex_lock(&rs
->bitmap_mutex
);
1988 pss_init(pss
, ramblock
, page_start
);
1990 * Always use the preempt channel, and make sure it's there. It's
1991 * safe to access without lock, because when rp-thread is running
1992 * we should be the only one who operates on the qemufile
1994 pss
->pss_channel
= migrate_get_current()->postcopy_qemufile_src
;
1995 assert(pss
->pss_channel
);
1998 * It must be either one or multiple of host page size. Just
1999 * assert; if something wrong we're mostly split brain anyway.
2001 assert(len
% page_size
== 0);
2003 if (ram_save_host_page_urgent(pss
)) {
2004 error_setg(errp
, "ram_save_host_page_urgent() failed: "
2005 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT
,
2006 ramblock
->idstr
, start
);
2011 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page
2012 * will automatically be moved and point to the next host page
2013 * we're going to send, so no need to update here.
2015 * Normally QEMU never sends >1 host page in requests, so
2016 * logically we don't even need that as the loop should only
2017 * run once, but just to be consistent.
2021 qemu_mutex_unlock(&rs
->bitmap_mutex
);
2026 struct RAMSrcPageRequest
*new_entry
=
2027 g_new0(struct RAMSrcPageRequest
, 1);
2028 new_entry
->rb
= ramblock
;
2029 new_entry
->offset
= start
;
2030 new_entry
->len
= len
;
2032 memory_region_ref(ramblock
->mr
);
2033 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2034 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
2035 migration_make_urgent_request();
2036 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2042 * try to compress the page before posting it out, return true if the page
2043 * has been properly handled by compression, otherwise needs other
2044 * paths to handle it
2046 static bool save_compress_page(RAMState
*rs
, PageSearchStatus
*pss
,
2049 if (!migrate_compress()) {
2054 * When starting the process of a new block, the first page of
2055 * the block should be sent out before other pages in the same
2056 * block, and all the pages in last block should have been sent
2057 * out, keeping this order is important, because the 'cont' flag
2058 * is used to avoid resending the block name.
2060 * We post the fist page as normal page as compression will take
2061 * much CPU resource.
2063 if (pss
->block
!= pss
->last_sent_block
) {
2064 compress_flush_data();
2068 return compress_page_with_multi_thread(pss
->block
, offset
,
2069 compress_send_queued_data
);
2073 * ram_save_target_page_legacy: save one target page
2075 * Returns the number of pages written
2077 * @rs: current RAM state
2078 * @pss: data about the page we want to send
2080 static int ram_save_target_page_legacy(RAMState
*rs
, PageSearchStatus
*pss
)
2082 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2085 if (control_save_page(pss
, offset
, &res
)) {
2089 if (save_compress_page(rs
, pss
, offset
)) {
2093 if (save_zero_page(rs
, pss
, offset
)) {
2097 return ram_save_page(rs
, pss
);
2101 * ram_save_target_page_multifd: send one target page to multifd workers
2103 * Returns 1 if the page was queued, -1 otherwise.
2105 * @rs: current RAM state
2106 * @pss: data about the page we want to send
2108 static int ram_save_target_page_multifd(RAMState
*rs
, PageSearchStatus
*pss
)
2110 RAMBlock
*block
= pss
->block
;
2111 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2114 * While using multifd live migration, we still need to handle zero
2115 * page checking on the migration main thread.
2117 if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY
) {
2118 if (save_zero_page(rs
, pss
, offset
)) {
2123 return ram_save_multifd_page(block
, offset
);
2126 /* Should be called before sending a host page */
2127 static void pss_host_page_prepare(PageSearchStatus
*pss
)
2129 /* How many guest pages are there in one host page? */
2130 size_t guest_pfns
= qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2132 pss
->host_page_sending
= true;
2133 if (guest_pfns
<= 1) {
2135 * This covers both when guest psize == host psize, or when guest
2136 * has larger psize than the host (guest_pfns==0).
2138 * For the latter, we always send one whole guest page per
2139 * iteration of the host page (example: an Alpha VM on x86 host
2140 * will have guest psize 8K while host psize 4K).
2142 pss
->host_page_start
= pss
->page
;
2143 pss
->host_page_end
= pss
->page
+ 1;
2146 * The host page spans over multiple guest pages, we send them
2147 * within the same host page iteration.
2149 pss
->host_page_start
= ROUND_DOWN(pss
->page
, guest_pfns
);
2150 pss
->host_page_end
= ROUND_UP(pss
->page
+ 1, guest_pfns
);
2155 * Whether the page pointed by PSS is within the host page being sent.
2156 * Must be called after a previous pss_host_page_prepare().
2158 static bool pss_within_range(PageSearchStatus
*pss
)
2160 ram_addr_t ram_addr
;
2162 assert(pss
->host_page_sending
);
2164 /* Over host-page boundary? */
2165 if (pss
->page
>= pss
->host_page_end
) {
2169 ram_addr
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2171 return offset_in_ramblock(pss
->block
, ram_addr
);
2174 static void pss_host_page_finish(PageSearchStatus
*pss
)
2176 pss
->host_page_sending
= false;
2177 /* This is not needed, but just to reset it */
2178 pss
->host_page_start
= pss
->host_page_end
= 0;
2182 * Send an urgent host page specified by `pss'. Need to be called with
2183 * bitmap_mutex held.
2185 * Returns 0 if save host page succeeded, false otherwise.
2187 static int ram_save_host_page_urgent(PageSearchStatus
*pss
)
2189 bool page_dirty
, sent
= false;
2190 RAMState
*rs
= ram_state
;
2193 trace_postcopy_preempt_send_host_page(pss
->block
->idstr
, pss
->page
);
2194 pss_host_page_prepare(pss
);
2197 * If precopy is sending the same page, let it be done in precopy, or
2198 * we could send the same page in two channels and none of them will
2199 * receive the whole page.
2201 if (pss_overlap(pss
, &ram_state
->pss
[RAM_CHANNEL_PRECOPY
])) {
2202 trace_postcopy_preempt_hit(pss
->block
->idstr
,
2203 pss
->page
<< TARGET_PAGE_BITS
);
2208 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2211 /* Be strict to return code; it must be 1, or what else? */
2212 if (migration_ops
->ram_save_target_page(rs
, pss
) != 1) {
2213 error_report_once("%s: ram_save_target_page failed", __func__
);
2219 pss_find_next_dirty(pss
);
2220 } while (pss_within_range(pss
));
2222 pss_host_page_finish(pss
);
2223 /* For urgent requests, flush immediately if sent */
2225 qemu_fflush(pss
->pss_channel
);
2231 * ram_save_host_page: save a whole host page
2233 * Starting at *offset send pages up to the end of the current host
2234 * page. It's valid for the initial offset to point into the middle of
2235 * a host page in which case the remainder of the hostpage is sent.
2236 * Only dirty target pages are sent. Note that the host page size may
2237 * be a huge page for this block.
2239 * The saving stops at the boundary of the used_length of the block
2240 * if the RAMBlock isn't a multiple of the host page size.
2242 * The caller must be with ram_state.bitmap_mutex held to call this
2243 * function. Note that this function can temporarily release the lock, but
2244 * when the function is returned it'll make sure the lock is still held.
2246 * Returns the number of pages written or negative on error
2248 * @rs: current RAM state
2249 * @pss: data about the page we want to send
2251 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
)
2253 bool page_dirty
, preempt_active
= postcopy_preempt_active();
2254 int tmppages
, pages
= 0;
2255 size_t pagesize_bits
=
2256 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2257 unsigned long start_page
= pss
->page
;
2260 if (migrate_ram_is_ignored(pss
->block
)) {
2261 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2265 /* Update host page boundary information */
2266 pss_host_page_prepare(pss
);
2269 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2271 /* Check the pages is dirty and if it is send it */
2274 * Properly yield the lock only in postcopy preempt mode
2275 * because both migration thread and rp-return thread can
2276 * operate on the bitmaps.
2278 if (preempt_active
) {
2279 qemu_mutex_unlock(&rs
->bitmap_mutex
);
2281 tmppages
= migration_ops
->ram_save_target_page(rs
, pss
);
2282 if (tmppages
>= 0) {
2285 * Allow rate limiting to happen in the middle of huge pages if
2286 * something is sent in the current iteration.
2288 if (pagesize_bits
> 1 && tmppages
> 0) {
2289 migration_rate_limit();
2292 if (preempt_active
) {
2293 qemu_mutex_lock(&rs
->bitmap_mutex
);
2300 pss_host_page_finish(pss
);
2304 pss_find_next_dirty(pss
);
2305 } while (pss_within_range(pss
));
2307 pss_host_page_finish(pss
);
2309 res
= ram_save_release_protection(rs
, pss
, start_page
);
2310 return (res
< 0 ? res
: pages
);
2314 * ram_find_and_save_block: finds a dirty page and sends it to f
2316 * Called within an RCU critical section.
2318 * Returns the number of pages written where zero means no dirty pages,
2319 * or negative on error
2321 * @rs: current RAM state
2323 * On systems where host-page-size > target-page-size it will send all the
2324 * pages in a host page that are dirty.
2326 static int ram_find_and_save_block(RAMState
*rs
)
2328 PageSearchStatus
*pss
= &rs
->pss
[RAM_CHANNEL_PRECOPY
];
2331 /* No dirty page as there is zero RAM */
2332 if (!rs
->ram_bytes_total
) {
2337 * Always keep last_seen_block/last_page valid during this procedure,
2338 * because find_dirty_block() relies on these values (e.g., we compare
2339 * last_seen_block with pss.block to see whether we searched all the
2340 * ramblocks) to detect the completion of migration. Having NULL value
2341 * of last_seen_block can conditionally cause below loop to run forever.
2343 if (!rs
->last_seen_block
) {
2344 rs
->last_seen_block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2348 pss_init(pss
, rs
->last_seen_block
, rs
->last_page
);
2351 if (!get_queued_page(rs
, pss
)) {
2352 /* priority queue empty, so just search for something dirty */
2353 int res
= find_dirty_block(rs
, pss
);
2354 if (res
!= PAGE_DIRTY_FOUND
) {
2355 if (res
== PAGE_ALL_CLEAN
) {
2357 } else if (res
== PAGE_TRY_AGAIN
) {
2359 } else if (res
< 0) {
2365 pages
= ram_save_host_page(rs
, pss
);
2371 rs
->last_seen_block
= pss
->block
;
2372 rs
->last_page
= pss
->page
;
2377 static uint64_t ram_bytes_total_with_ignored(void)
2382 RCU_READ_LOCK_GUARD();
2384 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2385 total
+= block
->used_length
;
2390 uint64_t ram_bytes_total(void)
2395 RCU_READ_LOCK_GUARD();
2397 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2398 total
+= block
->used_length
;
2403 static void xbzrle_load_setup(void)
2405 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2408 static void xbzrle_load_cleanup(void)
2410 g_free(XBZRLE
.decoded_buf
);
2411 XBZRLE
.decoded_buf
= NULL
;
2414 static void ram_state_cleanup(RAMState
**rsp
)
2417 migration_page_queue_free(*rsp
);
2418 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2419 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2425 static void xbzrle_cleanup(void)
2427 XBZRLE_cache_lock();
2429 cache_fini(XBZRLE
.cache
);
2430 g_free(XBZRLE
.encoded_buf
);
2431 g_free(XBZRLE
.current_buf
);
2432 g_free(XBZRLE
.zero_target_page
);
2433 XBZRLE
.cache
= NULL
;
2434 XBZRLE
.encoded_buf
= NULL
;
2435 XBZRLE
.current_buf
= NULL
;
2436 XBZRLE
.zero_target_page
= NULL
;
2438 XBZRLE_cache_unlock();
2441 static void ram_save_cleanup(void *opaque
)
2443 RAMState
**rsp
= opaque
;
2446 /* We don't use dirty log with background snapshots */
2447 if (!migrate_background_snapshot()) {
2448 /* caller have hold BQL or is in a bh, so there is
2449 * no writing race against the migration bitmap
2451 if (global_dirty_tracking
& GLOBAL_DIRTY_MIGRATION
) {
2453 * do not stop dirty log without starting it, since
2454 * memory_global_dirty_log_stop will assert that
2455 * memory_global_dirty_log_start/stop used in pairs
2457 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
2461 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2462 g_free(block
->clear_bmap
);
2463 block
->clear_bmap
= NULL
;
2464 g_free(block
->bmap
);
2469 compress_threads_save_cleanup();
2470 ram_state_cleanup(rsp
);
2471 g_free(migration_ops
);
2472 migration_ops
= NULL
;
2475 static void ram_state_reset(RAMState
*rs
)
2479 for (i
= 0; i
< RAM_CHANNEL_MAX
; i
++) {
2480 rs
->pss
[i
].last_sent_block
= NULL
;
2483 rs
->last_seen_block
= NULL
;
2485 rs
->last_version
= ram_list
.version
;
2486 rs
->xbzrle_started
= false;
2489 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2491 /* **** functions for postcopy ***** */
2493 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2495 struct RAMBlock
*block
;
2497 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2498 unsigned long *bitmap
= block
->bmap
;
2499 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2500 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2502 while (run_start
< range
) {
2503 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2504 ram_discard_range(block
->idstr
,
2505 ((ram_addr_t
)run_start
) << TARGET_PAGE_BITS
,
2506 ((ram_addr_t
)(run_end
- run_start
))
2507 << TARGET_PAGE_BITS
);
2508 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2514 * postcopy_send_discard_bm_ram: discard a RAMBlock
2516 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2518 * @ms: current migration state
2519 * @block: RAMBlock to discard
2521 static void postcopy_send_discard_bm_ram(MigrationState
*ms
, RAMBlock
*block
)
2523 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2524 unsigned long current
;
2525 unsigned long *bitmap
= block
->bmap
;
2527 for (current
= 0; current
< end
; ) {
2528 unsigned long one
= find_next_bit(bitmap
, end
, current
);
2529 unsigned long zero
, discard_length
;
2535 zero
= find_next_zero_bit(bitmap
, end
, one
+ 1);
2538 discard_length
= end
- one
;
2540 discard_length
= zero
- one
;
2542 postcopy_discard_send_range(ms
, one
, discard_length
);
2543 current
= one
+ discard_length
;
2547 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
);
2550 * postcopy_each_ram_send_discard: discard all RAMBlocks
2552 * Utility for the outgoing postcopy code.
2553 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2554 * passing it bitmap indexes and name.
2555 * (qemu_ram_foreach_block ends up passing unscaled lengths
2556 * which would mean postcopy code would have to deal with target page)
2558 * @ms: current migration state
2560 static void postcopy_each_ram_send_discard(MigrationState
*ms
)
2562 struct RAMBlock
*block
;
2564 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2565 postcopy_discard_send_init(ms
, block
->idstr
);
2568 * Deal with TPS != HPS and huge pages. It discard any partially sent
2569 * host-page size chunks, mark any partially dirty host-page size
2570 * chunks as all dirty. In this case the host-page is the host-page
2571 * for the particular RAMBlock, i.e. it might be a huge page.
2573 postcopy_chunk_hostpages_pass(ms
, block
);
2576 * Postcopy sends chunks of bitmap over the wire, but it
2577 * just needs indexes at this point, avoids it having
2578 * target page specific code.
2580 postcopy_send_discard_bm_ram(ms
, block
);
2581 postcopy_discard_send_finish(ms
);
2586 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2588 * Helper for postcopy_chunk_hostpages; it's called twice to
2589 * canonicalize the two bitmaps, that are similar, but one is
2592 * Postcopy requires that all target pages in a hostpage are dirty or
2593 * clean, not a mix. This function canonicalizes the bitmaps.
2595 * @ms: current migration state
2596 * @block: block that contains the page we want to canonicalize
2598 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
)
2600 RAMState
*rs
= ram_state
;
2601 unsigned long *bitmap
= block
->bmap
;
2602 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2603 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2604 unsigned long run_start
;
2606 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2607 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2611 /* Find a dirty page */
2612 run_start
= find_next_bit(bitmap
, pages
, 0);
2614 while (run_start
< pages
) {
2617 * If the start of this run of pages is in the middle of a host
2618 * page, then we need to fixup this host page.
2620 if (QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2621 /* Find the end of this run */
2622 run_start
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2624 * If the end isn't at the start of a host page, then the
2625 * run doesn't finish at the end of a host page
2626 * and we need to discard.
2630 if (!QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2632 unsigned long fixup_start_addr
= QEMU_ALIGN_DOWN(run_start
,
2634 run_start
= QEMU_ALIGN_UP(run_start
, host_ratio
);
2636 /* Clean up the bitmap */
2637 for (page
= fixup_start_addr
;
2638 page
< fixup_start_addr
+ host_ratio
; page
++) {
2640 * Remark them as dirty, updating the count for any pages
2641 * that weren't previously dirty.
2643 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2647 /* Find the next dirty page for the next iteration */
2648 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2653 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2655 * Transmit the set of pages to be discarded after precopy to the target
2656 * these are pages that:
2657 * a) Have been previously transmitted but are now dirty again
2658 * b) Pages that have never been transmitted, this ensures that
2659 * any pages on the destination that have been mapped by background
2660 * tasks get discarded (transparent huge pages is the specific concern)
2661 * Hopefully this is pretty sparse
2663 * @ms: current migration state
2665 void ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2667 RAMState
*rs
= ram_state
;
2669 RCU_READ_LOCK_GUARD();
2671 /* This should be our last sync, the src is now paused */
2672 migration_bitmap_sync(rs
, false);
2674 /* Easiest way to make sure we don't resume in the middle of a host-page */
2675 rs
->pss
[RAM_CHANNEL_PRECOPY
].last_sent_block
= NULL
;
2676 rs
->last_seen_block
= NULL
;
2679 postcopy_each_ram_send_discard(ms
);
2681 trace_ram_postcopy_send_discard_bitmap();
2685 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2687 * Returns zero on success
2689 * @rbname: name of the RAMBlock of the request. NULL means the
2690 * same that last one.
2691 * @start: RAMBlock starting page
2692 * @length: RAMBlock size
2694 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2696 trace_ram_discard_range(rbname
, start
, length
);
2698 RCU_READ_LOCK_GUARD();
2699 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2702 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2707 * On source VM, we don't need to update the received bitmap since
2708 * we don't even have one.
2710 if (rb
->receivedmap
) {
2711 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2712 length
>> qemu_target_page_bits());
2715 return ram_block_discard_range(rb
, start
, length
);
2719 * For every allocation, we will try not to crash the VM if the
2720 * allocation failed.
2722 static int xbzrle_init(void)
2724 Error
*local_err
= NULL
;
2726 if (!migrate_xbzrle()) {
2730 XBZRLE_cache_lock();
2732 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2733 if (!XBZRLE
.zero_target_page
) {
2734 error_report("%s: Error allocating zero page", __func__
);
2738 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2739 TARGET_PAGE_SIZE
, &local_err
);
2740 if (!XBZRLE
.cache
) {
2741 error_report_err(local_err
);
2742 goto free_zero_page
;
2745 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
2746 if (!XBZRLE
.encoded_buf
) {
2747 error_report("%s: Error allocating encoded_buf", __func__
);
2751 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
2752 if (!XBZRLE
.current_buf
) {
2753 error_report("%s: Error allocating current_buf", __func__
);
2754 goto free_encoded_buf
;
2757 /* We are all good */
2758 XBZRLE_cache_unlock();
2762 g_free(XBZRLE
.encoded_buf
);
2763 XBZRLE
.encoded_buf
= NULL
;
2765 cache_fini(XBZRLE
.cache
);
2766 XBZRLE
.cache
= NULL
;
2768 g_free(XBZRLE
.zero_target_page
);
2769 XBZRLE
.zero_target_page
= NULL
;
2771 XBZRLE_cache_unlock();
2775 static int ram_state_init(RAMState
**rsp
)
2777 *rsp
= g_try_new0(RAMState
, 1);
2780 error_report("%s: Init ramstate fail", __func__
);
2784 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
2785 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
2786 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
2787 (*rsp
)->ram_bytes_total
= ram_bytes_total();
2790 * Count the total number of pages used by ram blocks not including any
2791 * gaps due to alignment or unplugs.
2792 * This must match with the initial values of dirty bitmap.
2794 (*rsp
)->migration_dirty_pages
= (*rsp
)->ram_bytes_total
>> TARGET_PAGE_BITS
;
2795 ram_state_reset(*rsp
);
2800 static void ram_list_init_bitmaps(void)
2802 MigrationState
*ms
= migrate_get_current();
2804 unsigned long pages
;
2807 /* Skip setting bitmap if there is no RAM */
2808 if (ram_bytes_total()) {
2809 shift
= ms
->clear_bitmap_shift
;
2810 if (shift
> CLEAR_BITMAP_SHIFT_MAX
) {
2811 error_report("clear_bitmap_shift (%u) too big, using "
2812 "max value (%u)", shift
, CLEAR_BITMAP_SHIFT_MAX
);
2813 shift
= CLEAR_BITMAP_SHIFT_MAX
;
2814 } else if (shift
< CLEAR_BITMAP_SHIFT_MIN
) {
2815 error_report("clear_bitmap_shift (%u) too small, using "
2816 "min value (%u)", shift
, CLEAR_BITMAP_SHIFT_MIN
);
2817 shift
= CLEAR_BITMAP_SHIFT_MIN
;
2820 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2821 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
2823 * The initial dirty bitmap for migration must be set with all
2824 * ones to make sure we'll migrate every guest RAM page to
2826 * Here we set RAMBlock.bmap all to 1 because when rebegin a
2827 * new migration after a failed migration, ram_list.
2828 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2831 block
->bmap
= bitmap_new(pages
);
2832 bitmap_set(block
->bmap
, 0, pages
);
2833 if (migrate_mapped_ram()) {
2834 block
->file_bmap
= bitmap_new(pages
);
2836 block
->clear_bmap_shift
= shift
;
2837 block
->clear_bmap
= bitmap_new(clear_bmap_size(pages
, shift
));
2842 static void migration_bitmap_clear_discarded_pages(RAMState
*rs
)
2844 unsigned long pages
;
2847 RCU_READ_LOCK_GUARD();
2849 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
2850 pages
= ramblock_dirty_bitmap_clear_discarded_pages(rb
);
2851 rs
->migration_dirty_pages
-= pages
;
2855 static void ram_init_bitmaps(RAMState
*rs
)
2857 qemu_mutex_lock_ramlist();
2859 WITH_RCU_READ_LOCK_GUARD() {
2860 ram_list_init_bitmaps();
2861 /* We don't use dirty log with background snapshots */
2862 if (!migrate_background_snapshot()) {
2863 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
);
2864 migration_bitmap_sync_precopy(rs
, false);
2867 qemu_mutex_unlock_ramlist();
2870 * After an eventual first bitmap sync, fixup the initial bitmap
2871 * containing all 1s to exclude any discarded pages from migration.
2873 migration_bitmap_clear_discarded_pages(rs
);
2876 static int ram_init_all(RAMState
**rsp
)
2878 if (ram_state_init(rsp
)) {
2882 if (xbzrle_init()) {
2883 ram_state_cleanup(rsp
);
2887 ram_init_bitmaps(*rsp
);
2892 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
2898 * Postcopy is not using xbzrle/compression, so no need for that.
2899 * Also, since source are already halted, we don't need to care
2900 * about dirty page logging as well.
2903 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2904 pages
+= bitmap_count_one(block
->bmap
,
2905 block
->used_length
>> TARGET_PAGE_BITS
);
2908 /* This may not be aligned with current bitmaps. Recalculate. */
2909 rs
->migration_dirty_pages
= pages
;
2911 ram_state_reset(rs
);
2913 /* Update RAMState cache of output QEMUFile */
2914 rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= out
;
2916 trace_ram_state_resume_prepare(pages
);
2920 * This function clears bits of the free pages reported by the caller from the
2921 * migration dirty bitmap. @addr is the host address corresponding to the
2922 * start of the continuous guest free pages, and @len is the total bytes of
2925 void qemu_guest_free_page_hint(void *addr
, size_t len
)
2929 size_t used_len
, start
, npages
;
2931 /* This function is currently expected to be used during live migration */
2932 if (!migration_is_setup_or_active()) {
2936 for (; len
> 0; len
-= used_len
, addr
+= used_len
) {
2937 block
= qemu_ram_block_from_host(addr
, false, &offset
);
2938 if (unlikely(!block
|| offset
>= block
->used_length
)) {
2940 * The implementation might not support RAMBlock resize during
2941 * live migration, but it could happen in theory with future
2942 * updates. So we add a check here to capture that case.
2944 error_report_once("%s unexpected error", __func__
);
2948 if (len
<= block
->used_length
- offset
) {
2951 used_len
= block
->used_length
- offset
;
2954 start
= offset
>> TARGET_PAGE_BITS
;
2955 npages
= used_len
>> TARGET_PAGE_BITS
;
2957 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
2959 * The skipped free pages are equavalent to be sent from clear_bmap's
2960 * perspective, so clear the bits from the memory region bitmap which
2961 * are initially set. Otherwise those skipped pages will be sent in
2962 * the next round after syncing from the memory region bitmap.
2964 migration_clear_memory_region_dirty_bitmap_range(block
, start
, npages
);
2965 ram_state
->migration_dirty_pages
-=
2966 bitmap_count_one_with_offset(block
->bmap
, start
, npages
);
2967 bitmap_clear(block
->bmap
, start
, npages
);
2968 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
2972 #define MAPPED_RAM_HDR_VERSION 1
2973 struct MappedRamHeader
{
2976 * The target's page size, so we know how many pages are in the
2981 * The offset in the migration file where the pages bitmap is
2984 uint64_t bitmap_offset
;
2986 * The offset in the migration file where the actual pages (data)
2989 uint64_t pages_offset
;
2991 typedef struct MappedRamHeader MappedRamHeader
;
2993 static void mapped_ram_setup_ramblock(QEMUFile
*file
, RAMBlock
*block
)
2995 g_autofree MappedRamHeader
*header
= NULL
;
2996 size_t header_size
, bitmap_size
;
2999 header
= g_new0(MappedRamHeader
, 1);
3000 header_size
= sizeof(MappedRamHeader
);
3002 num_pages
= block
->used_length
>> TARGET_PAGE_BITS
;
3003 bitmap_size
= BITS_TO_LONGS(num_pages
) * sizeof(unsigned long);
3006 * Save the file offsets of where the bitmap and the pages should
3007 * go as they are written at the end of migration and during the
3008 * iterative phase, respectively.
3010 block
->bitmap_offset
= qemu_get_offset(file
) + header_size
;
3011 block
->pages_offset
= ROUND_UP(block
->bitmap_offset
+
3013 MAPPED_RAM_FILE_OFFSET_ALIGNMENT
);
3015 header
->version
= cpu_to_be32(MAPPED_RAM_HDR_VERSION
);
3016 header
->page_size
= cpu_to_be64(TARGET_PAGE_SIZE
);
3017 header
->bitmap_offset
= cpu_to_be64(block
->bitmap_offset
);
3018 header
->pages_offset
= cpu_to_be64(block
->pages_offset
);
3020 qemu_put_buffer(file
, (uint8_t *) header
, header_size
);
3022 /* prepare offset for next ramblock */
3023 qemu_set_offset(file
, block
->pages_offset
+ block
->used_length
, SEEK_SET
);
3026 static bool mapped_ram_read_header(QEMUFile
*file
, MappedRamHeader
*header
,
3029 size_t ret
, header_size
= sizeof(MappedRamHeader
);
3031 ret
= qemu_get_buffer(file
, (uint8_t *)header
, header_size
);
3032 if (ret
!= header_size
) {
3033 error_setg(errp
, "Could not read whole mapped-ram migration header "
3034 "(expected %zd, got %zd bytes)", header_size
, ret
);
3038 /* migration stream is big-endian */
3039 header
->version
= be32_to_cpu(header
->version
);
3041 if (header
->version
> MAPPED_RAM_HDR_VERSION
) {
3042 error_setg(errp
, "Migration mapped-ram capability version not "
3043 "supported (expected <= %d, got %d)", MAPPED_RAM_HDR_VERSION
,
3048 header
->page_size
= be64_to_cpu(header
->page_size
);
3049 header
->bitmap_offset
= be64_to_cpu(header
->bitmap_offset
);
3050 header
->pages_offset
= be64_to_cpu(header
->pages_offset
);
3056 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3057 * long-running RCU critical section. When rcu-reclaims in the code
3058 * start to become numerous it will be necessary to reduce the
3059 * granularity of these critical sections.
3063 * ram_save_setup: Setup RAM for migration
3065 * Returns zero to indicate success and negative for error
3067 * @f: QEMUFile where to send the data
3068 * @opaque: RAMState pointer
3069 * @errp: pointer to Error*, to store an error if it happens.
3071 static int ram_save_setup(QEMUFile
*f
, void *opaque
, Error
**errp
)
3073 RAMState
**rsp
= opaque
;
3075 int ret
, max_hg_page_size
;
3077 if (compress_threads_save_setup()) {
3078 error_setg(errp
, "%s: failed to start compress threads", __func__
);
3082 /* migration has already setup the bitmap, reuse it. */
3083 if (!migration_in_colo_state()) {
3084 if (ram_init_all(rsp
) != 0) {
3085 error_setg(errp
, "%s: failed to setup RAM for migration", __func__
);
3086 compress_threads_save_cleanup();
3090 (*rsp
)->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= f
;
3093 * ??? Mirrors the previous value of qemu_host_page_size,
3094 * but is this really what was intended for the migration?
3096 max_hg_page_size
= MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE
);
3098 WITH_RCU_READ_LOCK_GUARD() {
3099 qemu_put_be64(f
, ram_bytes_total_with_ignored()
3100 | RAM_SAVE_FLAG_MEM_SIZE
);
3102 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3103 qemu_put_byte(f
, strlen(block
->idstr
));
3104 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
3105 qemu_put_be64(f
, block
->used_length
);
3106 if (migrate_postcopy_ram() &&
3107 block
->page_size
!= max_hg_page_size
) {
3108 qemu_put_be64(f
, block
->page_size
);
3110 if (migrate_ignore_shared()) {
3111 qemu_put_be64(f
, block
->mr
->addr
);
3114 if (migrate_mapped_ram()) {
3115 mapped_ram_setup_ramblock(f
, block
);
3120 ret
= rdma_registration_start(f
, RAM_CONTROL_SETUP
);
3122 error_setg(errp
, "%s: failed to start RDMA registration", __func__
);
3123 qemu_file_set_error(f
, ret
);
3127 ret
= rdma_registration_stop(f
, RAM_CONTROL_SETUP
);
3129 error_setg(errp
, "%s: failed to stop RDMA registration", __func__
);
3130 qemu_file_set_error(f
, ret
);
3134 migration_ops
= g_malloc0(sizeof(MigrationOps
));
3136 if (migrate_multifd()) {
3137 migration_ops
->ram_save_target_page
= ram_save_target_page_multifd
;
3139 migration_ops
->ram_save_target_page
= ram_save_target_page_legacy
;
3143 ret
= multifd_send_sync_main();
3146 error_setg(errp
, "%s: multifd synchronization failed", __func__
);
3150 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()
3151 && !migrate_mapped_ram()) {
3152 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
3155 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3156 ret
= qemu_fflush(f
);
3158 error_setg_errno(errp
, -ret
, "%s failed", __func__
);
3163 static void ram_save_file_bmap(QEMUFile
*f
)
3167 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3168 long num_pages
= block
->used_length
>> TARGET_PAGE_BITS
;
3169 long bitmap_size
= BITS_TO_LONGS(num_pages
) * sizeof(unsigned long);
3171 qemu_put_buffer_at(f
, (uint8_t *)block
->file_bmap
, bitmap_size
,
3172 block
->bitmap_offset
);
3173 ram_transferred_add(bitmap_size
);
3176 * Free the bitmap here to catch any synchronization issues
3177 * with multifd channels. No channels should be sending pages
3178 * after we've written the bitmap to file.
3180 g_free(block
->file_bmap
);
3181 block
->file_bmap
= NULL
;
3185 void ramblock_set_file_bmap_atomic(RAMBlock
*block
, ram_addr_t offset
, bool set
)
3188 set_bit_atomic(offset
>> TARGET_PAGE_BITS
, block
->file_bmap
);
3190 clear_bit_atomic(offset
>> TARGET_PAGE_BITS
, block
->file_bmap
);
3195 * ram_save_iterate: iterative stage for migration
3197 * Returns zero to indicate success and negative for error
3199 * @f: QEMUFile where to send the data
3200 * @opaque: RAMState pointer
3202 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
3204 RAMState
**temp
= opaque
;
3205 RAMState
*rs
= *temp
;
3211 if (blk_mig_bulk_active()) {
3212 /* Avoid transferring ram during bulk phase of block migration as
3213 * the bulk phase will usually take a long time and transferring
3214 * ram updates during that time is pointless. */
3219 * We'll take this lock a little bit long, but it's okay for two reasons.
3220 * Firstly, the only possible other thread to take it is who calls
3221 * qemu_guest_free_page_hint(), which should be rare; secondly, see
3222 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
3223 * guarantees that we'll at least released it in a regular basis.
3225 WITH_QEMU_LOCK_GUARD(&rs
->bitmap_mutex
) {
3226 WITH_RCU_READ_LOCK_GUARD() {
3227 if (ram_list
.version
!= rs
->last_version
) {
3228 ram_state_reset(rs
);
3231 /* Read version before ram_list.blocks */
3234 ret
= rdma_registration_start(f
, RAM_CONTROL_ROUND
);
3236 qemu_file_set_error(f
, ret
);
3240 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
3242 while ((ret
= migration_rate_exceeded(f
)) == 0 ||
3243 postcopy_has_request(rs
)) {
3246 if (qemu_file_get_error(f
)) {
3250 pages
= ram_find_and_save_block(rs
);
3251 /* no more pages to sent */
3258 qemu_file_set_error(f
, pages
);
3262 rs
->target_page_count
+= pages
;
3265 * During postcopy, it is necessary to make sure one whole host
3266 * page is sent in one chunk.
3268 if (migrate_postcopy_ram()) {
3269 compress_flush_data();
3273 * we want to check in the 1st loop, just in case it was the 1st
3274 * time and we had to sync the dirty bitmap.
3275 * qemu_clock_get_ns() is a bit expensive, so we only check each
3278 if ((i
& 63) == 0) {
3279 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) /
3281 if (t1
> MAX_WAIT
) {
3282 trace_ram_save_iterate_big_wait(t1
, i
);
3292 * Must occur before EOS (or any QEMUFile operation)
3293 * because of RDMA protocol.
3295 ret
= rdma_registration_stop(f
, RAM_CONTROL_ROUND
);
3297 qemu_file_set_error(f
, ret
);
3302 && migration_is_setup_or_active()) {
3303 if (migrate_multifd() && migrate_multifd_flush_after_each_section() &&
3304 !migrate_mapped_ram()) {
3305 ret
= multifd_send_sync_main();
3311 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3312 ram_transferred_add(8);
3313 ret
= qemu_fflush(f
);
3323 * ram_save_complete: function called to send the remaining amount of ram
3325 * Returns zero to indicate success or negative on error
3327 * Called with the BQL
3329 * @f: QEMUFile where to send the data
3330 * @opaque: RAMState pointer
3332 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3334 RAMState
**temp
= opaque
;
3335 RAMState
*rs
= *temp
;
3338 rs
->last_stage
= !migration_in_colo_state();
3340 WITH_RCU_READ_LOCK_GUARD() {
3341 if (!migration_in_postcopy()) {
3342 migration_bitmap_sync_precopy(rs
, true);
3345 ret
= rdma_registration_start(f
, RAM_CONTROL_FINISH
);
3347 qemu_file_set_error(f
, ret
);
3351 /* try transferring iterative blocks of memory */
3353 /* flush all remaining blocks regardless of rate limiting */
3354 qemu_mutex_lock(&rs
->bitmap_mutex
);
3358 pages
= ram_find_and_save_block(rs
);
3359 /* no more blocks to sent */
3364 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3368 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3370 compress_flush_data();
3372 ret
= rdma_registration_stop(f
, RAM_CONTROL_FINISH
);
3374 qemu_file_set_error(f
, ret
);
3379 ret
= multifd_send_sync_main();
3384 if (migrate_mapped_ram()) {
3385 ram_save_file_bmap(f
);
3387 if (qemu_file_get_error(f
)) {
3388 Error
*local_err
= NULL
;
3389 int err
= qemu_file_get_error_obj(f
, &local_err
);
3391 error_reportf_err(local_err
, "Failed to write bitmap to file: ");
3396 if (migrate_multifd() && !migrate_multifd_flush_after_each_section() &&
3397 !migrate_mapped_ram()) {
3398 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
3400 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3401 return qemu_fflush(f
);
3404 static void ram_state_pending_estimate(void *opaque
, uint64_t *must_precopy
,
3405 uint64_t *can_postcopy
)
3407 RAMState
**temp
= opaque
;
3408 RAMState
*rs
= *temp
;
3410 uint64_t remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3412 if (migrate_postcopy_ram()) {
3413 /* We can do postcopy, and all the data is postcopiable */
3414 *can_postcopy
+= remaining_size
;
3416 *must_precopy
+= remaining_size
;
3420 static void ram_state_pending_exact(void *opaque
, uint64_t *must_precopy
,
3421 uint64_t *can_postcopy
)
3423 RAMState
**temp
= opaque
;
3424 RAMState
*rs
= *temp
;
3425 uint64_t remaining_size
;
3427 if (!migration_in_postcopy()) {
3429 WITH_RCU_READ_LOCK_GUARD() {
3430 migration_bitmap_sync_precopy(rs
, false);
3435 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3437 if (migrate_postcopy_ram()) {
3438 /* We can do postcopy, and all the data is postcopiable */
3439 *can_postcopy
+= remaining_size
;
3441 *must_precopy
+= remaining_size
;
3445 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3447 unsigned int xh_len
;
3449 uint8_t *loaded_data
;
3451 /* extract RLE header */
3452 xh_flags
= qemu_get_byte(f
);
3453 xh_len
= qemu_get_be16(f
);
3455 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3456 error_report("Failed to load XBZRLE page - wrong compression!");
3460 if (xh_len
> TARGET_PAGE_SIZE
) {
3461 error_report("Failed to load XBZRLE page - len overflow!");
3464 loaded_data
= XBZRLE
.decoded_buf
;
3465 /* load data and decode */
3466 /* it can change loaded_data to point to an internal buffer */
3467 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3470 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3471 TARGET_PAGE_SIZE
) == -1) {
3472 error_report("Failed to load XBZRLE page - decode error!");
3480 * ram_block_from_stream: read a RAMBlock id from the migration stream
3482 * Must be called from within a rcu critical section.
3484 * Returns a pointer from within the RCU-protected ram_list.
3486 * @mis: the migration incoming state pointer
3487 * @f: QEMUFile where to read the data from
3488 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3489 * @channel: the channel we're using
3491 static inline RAMBlock
*ram_block_from_stream(MigrationIncomingState
*mis
,
3492 QEMUFile
*f
, int flags
,
3495 RAMBlock
*block
= mis
->last_recv_block
[channel
];
3499 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3501 error_report("Ack, bad migration stream!");
3507 len
= qemu_get_byte(f
);
3508 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3511 block
= qemu_ram_block_by_name(id
);
3513 error_report("Can't find block %s", id
);
3517 if (migrate_ram_is_ignored(block
)) {
3518 error_report("block %s should not be migrated !", id
);
3522 mis
->last_recv_block
[channel
] = block
;
3527 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3530 if (!offset_in_ramblock(block
, offset
)) {
3534 return block
->host
+ offset
;
3537 static void *host_page_from_ram_block_offset(RAMBlock
*block
,
3540 /* Note: Explicitly no check against offset_in_ramblock(). */
3541 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block
->host
+ offset
),
3545 static ram_addr_t
host_page_offset_from_ram_block_offset(RAMBlock
*block
,
3548 return ((uintptr_t)block
->host
+ offset
) & (block
->page_size
- 1);
3551 void colo_record_bitmap(RAMBlock
*block
, ram_addr_t
*normal
, uint32_t pages
)
3553 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3554 for (int i
= 0; i
< pages
; i
++) {
3555 ram_addr_t offset
= normal
[i
];
3556 ram_state
->migration_dirty_pages
+= !test_and_set_bit(
3557 offset
>> TARGET_PAGE_BITS
,
3560 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3563 static inline void *colo_cache_from_block_offset(RAMBlock
*block
,
3564 ram_addr_t offset
, bool record_bitmap
)
3566 if (!offset_in_ramblock(block
, offset
)) {
3569 if (!block
->colo_cache
) {
3570 error_report("%s: colo_cache is NULL in block :%s",
3571 __func__
, block
->idstr
);
3576 * During colo checkpoint, we need bitmap of these migrated pages.
3577 * It help us to decide which pages in ram cache should be flushed
3578 * into VM's RAM later.
3580 if (record_bitmap
) {
3581 colo_record_bitmap(block
, &offset
, 1);
3583 return block
->colo_cache
+ offset
;
3587 * ram_handle_zero: handle the zero page case
3589 * If a page (or a whole RDMA chunk) has been
3590 * determined to be zero, then zap it.
3592 * @host: host address for the zero page
3593 * @ch: what the page is filled from. We only support zero
3594 * @size: size of the zero page
3596 void ram_handle_zero(void *host
, uint64_t size
)
3598 if (!buffer_is_zero(host
, size
)) {
3599 memset(host
, 0, size
);
3603 static void colo_init_ram_state(void)
3605 ram_state_init(&ram_state
);
3609 * colo cache: this is for secondary VM, we cache the whole
3610 * memory of the secondary VM, it is need to hold the global lock
3611 * to call this helper.
3613 int colo_init_ram_cache(void)
3617 WITH_RCU_READ_LOCK_GUARD() {
3618 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3619 block
->colo_cache
= qemu_anon_ram_alloc(block
->used_length
,
3620 NULL
, false, false);
3621 if (!block
->colo_cache
) {
3622 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3623 "size 0x" RAM_ADDR_FMT
, __func__
, block
->idstr
,
3624 block
->used_length
);
3625 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3626 if (block
->colo_cache
) {
3627 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3628 block
->colo_cache
= NULL
;
3633 if (!machine_dump_guest_core(current_machine
)) {
3634 qemu_madvise(block
->colo_cache
, block
->used_length
,
3635 QEMU_MADV_DONTDUMP
);
3641 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3642 * with to decide which page in cache should be flushed into SVM's RAM. Here
3643 * we use the same name 'ram_bitmap' as for migration.
3645 if (ram_bytes_total()) {
3646 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3647 unsigned long pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3648 block
->bmap
= bitmap_new(pages
);
3652 colo_init_ram_state();
3656 /* TODO: duplicated with ram_init_bitmaps */
3657 void colo_incoming_start_dirty_log(void)
3659 RAMBlock
*block
= NULL
;
3660 /* For memory_global_dirty_log_start below. */
3662 qemu_mutex_lock_ramlist();
3664 memory_global_dirty_log_sync(false);
3665 WITH_RCU_READ_LOCK_GUARD() {
3666 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3667 ramblock_sync_dirty_bitmap(ram_state
, block
);
3668 /* Discard this dirty bitmap record */
3669 bitmap_zero(block
->bmap
, block
->max_length
>> TARGET_PAGE_BITS
);
3671 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
);
3673 ram_state
->migration_dirty_pages
= 0;
3674 qemu_mutex_unlock_ramlist();
3678 /* It is need to hold the global lock to call this helper */
3679 void colo_release_ram_cache(void)
3683 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
3684 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3685 g_free(block
->bmap
);
3689 WITH_RCU_READ_LOCK_GUARD() {
3690 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3691 if (block
->colo_cache
) {
3692 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3693 block
->colo_cache
= NULL
;
3697 ram_state_cleanup(&ram_state
);
3701 * ram_load_setup: Setup RAM for migration incoming side
3703 * Returns zero to indicate success and negative for error
3705 * @f: QEMUFile where to receive the data
3706 * @opaque: RAMState pointer
3707 * @errp: pointer to Error*, to store an error if it happens.
3709 static int ram_load_setup(QEMUFile
*f
, void *opaque
, Error
**errp
)
3711 xbzrle_load_setup();
3712 ramblock_recv_map_init();
3717 static int ram_load_cleanup(void *opaque
)
3721 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3722 qemu_ram_block_writeback(rb
);
3725 xbzrle_load_cleanup();
3727 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3728 g_free(rb
->receivedmap
);
3729 rb
->receivedmap
= NULL
;
3736 * ram_postcopy_incoming_init: allocate postcopy data structures
3738 * Returns 0 for success and negative if there was one error
3740 * @mis: current migration incoming state
3742 * Allocate data structures etc needed by incoming migration with
3743 * postcopy-ram. postcopy-ram's similarly names
3744 * postcopy_ram_incoming_init does the work.
3746 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3748 return postcopy_ram_incoming_init(mis
);
3752 * ram_load_postcopy: load a page in postcopy case
3754 * Returns 0 for success or -errno in case of error
3756 * Called in postcopy mode by ram_load().
3757 * rcu_read_lock is taken prior to this being called.
3759 * @f: QEMUFile where to send the data
3760 * @channel: the channel to use for loading
3762 int ram_load_postcopy(QEMUFile
*f
, int channel
)
3764 int flags
= 0, ret
= 0;
3765 bool place_needed
= false;
3766 bool matches_target_page_size
= false;
3767 MigrationIncomingState
*mis
= migration_incoming_get_current();
3768 PostcopyTmpPage
*tmp_page
= &mis
->postcopy_tmp_pages
[channel
];
3770 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3772 void *page_buffer
= NULL
;
3773 void *place_source
= NULL
;
3774 RAMBlock
*block
= NULL
;
3778 addr
= qemu_get_be64(f
);
3781 * If qemu file error, we should stop here, and then "addr"
3784 ret
= qemu_file_get_error(f
);
3789 flags
= addr
& ~TARGET_PAGE_MASK
;
3790 addr
&= TARGET_PAGE_MASK
;
3792 trace_ram_load_postcopy_loop(channel
, (uint64_t)addr
, flags
);
3793 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3794 RAM_SAVE_FLAG_COMPRESS_PAGE
)) {
3795 block
= ram_block_from_stream(mis
, f
, flags
, channel
);
3802 * Relying on used_length is racy and can result in false positives.
3803 * We might place pages beyond used_length in case RAM was shrunk
3804 * while in postcopy, which is fine - trying to place via
3805 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3807 if (!block
->host
|| addr
>= block
->postcopy_length
) {
3808 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3812 tmp_page
->target_pages
++;
3813 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
3815 * Postcopy requires that we place whole host pages atomically;
3816 * these may be huge pages for RAMBlocks that are backed by
3818 * To make it atomic, the data is read into a temporary page
3819 * that's moved into place later.
3820 * The migration protocol uses, possibly smaller, target-pages
3821 * however the source ensures it always sends all the components
3822 * of a host page in one chunk.
3824 page_buffer
= tmp_page
->tmp_huge_page
+
3825 host_page_offset_from_ram_block_offset(block
, addr
);
3826 /* If all TP are zero then we can optimise the place */
3827 if (tmp_page
->target_pages
== 1) {
3828 tmp_page
->host_addr
=
3829 host_page_from_ram_block_offset(block
, addr
);
3830 } else if (tmp_page
->host_addr
!=
3831 host_page_from_ram_block_offset(block
, addr
)) {
3832 /* not the 1st TP within the HP */
3833 error_report("Non-same host page detected on channel %d: "
3834 "Target host page %p, received host page %p "
3835 "(rb %s offset 0x"RAM_ADDR_FMT
" target_pages %d)",
3836 channel
, tmp_page
->host_addr
,
3837 host_page_from_ram_block_offset(block
, addr
),
3838 block
->idstr
, addr
, tmp_page
->target_pages
);
3844 * If it's the last part of a host page then we place the host
3847 if (tmp_page
->target_pages
==
3848 (block
->page_size
/ TARGET_PAGE_SIZE
)) {
3849 place_needed
= true;
3851 place_source
= tmp_page
->tmp_huge_page
;
3854 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3855 case RAM_SAVE_FLAG_ZERO
:
3856 ch
= qemu_get_byte(f
);
3858 error_report("Found a zero page with value %d", ch
);
3863 * Can skip to set page_buffer when
3864 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3866 if (!matches_target_page_size
) {
3867 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3871 case RAM_SAVE_FLAG_PAGE
:
3872 tmp_page
->all_zero
= false;
3873 if (!matches_target_page_size
) {
3874 /* For huge pages, we always use temporary buffer */
3875 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3878 * For small pages that matches target page size, we
3879 * avoid the qemu_file copy. Instead we directly use
3880 * the buffer of QEMUFile to place the page. Note: we
3881 * cannot do any QEMUFile operation before using that
3882 * buffer to make sure the buffer is valid when
3885 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3889 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
3890 tmp_page
->all_zero
= false;
3891 len
= qemu_get_be32(f
);
3892 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
3893 error_report("Invalid compressed data length: %d", len
);
3897 decompress_data_with_multi_threads(f
, page_buffer
, len
);
3899 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
3900 multifd_recv_sync_main();
3902 case RAM_SAVE_FLAG_EOS
:
3904 if (migrate_multifd() &&
3905 migrate_multifd_flush_after_each_section()) {
3906 multifd_recv_sync_main();
3910 error_report("Unknown combination of migration flags: 0x%x"
3911 " (postcopy mode)", flags
);
3916 /* Got the whole host page, wait for decompress before placing. */
3918 ret
|= wait_for_decompress_done();
3921 /* Detect for any possible file errors */
3922 if (!ret
&& qemu_file_get_error(f
)) {
3923 ret
= qemu_file_get_error(f
);
3926 if (!ret
&& place_needed
) {
3927 if (tmp_page
->all_zero
) {
3928 ret
= postcopy_place_page_zero(mis
, tmp_page
->host_addr
, block
);
3930 ret
= postcopy_place_page(mis
, tmp_page
->host_addr
,
3931 place_source
, block
);
3933 place_needed
= false;
3934 postcopy_temp_page_reset(tmp_page
);
3941 static bool postcopy_is_running(void)
3943 PostcopyState ps
= postcopy_state_get();
3944 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3948 * Flush content of RAM cache into SVM's memory.
3949 * Only flush the pages that be dirtied by PVM or SVM or both.
3951 void colo_flush_ram_cache(void)
3953 RAMBlock
*block
= NULL
;
3956 unsigned long offset
= 0;
3958 memory_global_dirty_log_sync(false);
3959 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3960 WITH_RCU_READ_LOCK_GUARD() {
3961 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3962 ramblock_sync_dirty_bitmap(ram_state
, block
);
3966 trace_colo_flush_ram_cache_begin(ram_state
->migration_dirty_pages
);
3967 WITH_RCU_READ_LOCK_GUARD() {
3968 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
3971 unsigned long num
= 0;
3973 offset
= colo_bitmap_find_dirty(ram_state
, block
, offset
, &num
);
3974 if (!offset_in_ramblock(block
,
3975 ((ram_addr_t
)offset
) << TARGET_PAGE_BITS
)) {
3978 block
= QLIST_NEXT_RCU(block
, next
);
3980 unsigned long i
= 0;
3982 for (i
= 0; i
< num
; i
++) {
3983 migration_bitmap_clear_dirty(ram_state
, block
, offset
+ i
);
3985 dst_host
= block
->host
3986 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3987 src_host
= block
->colo_cache
3988 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3989 memcpy(dst_host
, src_host
, TARGET_PAGE_SIZE
* num
);
3994 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3995 trace_colo_flush_ram_cache_end();
3998 static size_t ram_load_multifd_pages(void *host_addr
, size_t size
,
4001 MultiFDRecvData
*data
= multifd_get_recv_data();
4003 data
->opaque
= host_addr
;
4004 data
->file_offset
= offset
;
4007 if (!multifd_recv()) {
4014 static bool read_ramblock_mapped_ram(QEMUFile
*f
, RAMBlock
*block
,
4015 long num_pages
, unsigned long *bitmap
,
4019 unsigned long set_bit_idx
, clear_bit_idx
;
4022 size_t read
, unread
, size
;
4024 for (set_bit_idx
= find_first_bit(bitmap
, num_pages
);
4025 set_bit_idx
< num_pages
;
4026 set_bit_idx
= find_next_bit(bitmap
, num_pages
, clear_bit_idx
+ 1)) {
4028 clear_bit_idx
= find_next_zero_bit(bitmap
, num_pages
, set_bit_idx
+ 1);
4030 unread
= TARGET_PAGE_SIZE
* (clear_bit_idx
- set_bit_idx
);
4031 offset
= set_bit_idx
<< TARGET_PAGE_BITS
;
4033 while (unread
> 0) {
4034 host
= host_from_ram_block_offset(block
, offset
);
4036 error_setg(errp
, "page outside of ramblock %s range",
4041 size
= MIN(unread
, MAPPED_RAM_LOAD_BUF_SIZE
);
4043 if (migrate_multifd()) {
4044 read
= ram_load_multifd_pages(host
, size
,
4045 block
->pages_offset
+ offset
);
4047 read
= qemu_get_buffer_at(f
, host
, size
,
4048 block
->pages_offset
+ offset
);
4062 qemu_file_get_error_obj(f
, errp
);
4063 error_prepend(errp
, "(%s) failed to read page " RAM_ADDR_FMT
4064 "from file offset %" PRIx64
": ", block
->idstr
, offset
,
4065 block
->pages_offset
+ offset
);
4069 static void parse_ramblock_mapped_ram(QEMUFile
*f
, RAMBlock
*block
,
4070 ram_addr_t length
, Error
**errp
)
4072 g_autofree
unsigned long *bitmap
= NULL
;
4073 MappedRamHeader header
;
4077 if (!mapped_ram_read_header(f
, &header
, errp
)) {
4081 block
->pages_offset
= header
.pages_offset
;
4084 * Check the alignment of the file region that contains pages. We
4085 * don't enforce MAPPED_RAM_FILE_OFFSET_ALIGNMENT to allow that
4086 * value to change in the future. Do only a sanity check with page
4089 if (!QEMU_IS_ALIGNED(block
->pages_offset
, TARGET_PAGE_SIZE
)) {
4091 "Error reading ramblock %s pages, region has bad alignment",
4096 num_pages
= length
/ header
.page_size
;
4097 bitmap_size
= BITS_TO_LONGS(num_pages
) * sizeof(unsigned long);
4099 bitmap
= g_malloc0(bitmap_size
);
4100 if (qemu_get_buffer_at(f
, (uint8_t *)bitmap
, bitmap_size
,
4101 header
.bitmap_offset
) != bitmap_size
) {
4102 error_setg(errp
, "Error reading dirty bitmap");
4106 if (!read_ramblock_mapped_ram(f
, block
, num_pages
, bitmap
, errp
)) {
4110 /* Skip pages array */
4111 qemu_set_offset(f
, block
->pages_offset
+ length
, SEEK_SET
);
4116 static int parse_ramblock(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t length
)
4119 /* ADVISE is earlier, it shows the source has the postcopy capability on */
4120 bool postcopy_advised
= migration_incoming_postcopy_advised();
4121 int max_hg_page_size
;
4122 Error
*local_err
= NULL
;
4126 if (migrate_mapped_ram()) {
4127 parse_ramblock_mapped_ram(f
, block
, length
, &local_err
);
4129 error_report_err(local_err
);
4135 if (!qemu_ram_is_migratable(block
)) {
4136 error_report("block %s should not be migrated !", block
->idstr
);
4140 if (length
!= block
->used_length
) {
4141 ret
= qemu_ram_resize(block
, length
, &local_err
);
4143 error_report_err(local_err
);
4149 * ??? Mirrors the previous value of qemu_host_page_size,
4150 * but is this really what was intended for the migration?
4152 max_hg_page_size
= MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE
);
4154 /* For postcopy we need to check hugepage sizes match */
4155 if (postcopy_advised
&& migrate_postcopy_ram() &&
4156 block
->page_size
!= max_hg_page_size
) {
4157 uint64_t remote_page_size
= qemu_get_be64(f
);
4158 if (remote_page_size
!= block
->page_size
) {
4159 error_report("Mismatched RAM page size %s "
4160 "(local) %zd != %" PRId64
, block
->idstr
,
4161 block
->page_size
, remote_page_size
);
4165 if (migrate_ignore_shared()) {
4166 hwaddr addr
= qemu_get_be64(f
);
4167 if (migrate_ram_is_ignored(block
) &&
4168 block
->mr
->addr
!= addr
) {
4169 error_report("Mismatched GPAs for block %s "
4170 "%" PRId64
"!= %" PRId64
, block
->idstr
,
4171 (uint64_t)addr
, (uint64_t)block
->mr
->addr
);
4175 ret
= rdma_block_notification_handle(f
, block
->idstr
);
4177 qemu_file_set_error(f
, ret
);
4183 static int parse_ramblocks(QEMUFile
*f
, ram_addr_t total_ram_bytes
)
4187 /* Synchronize RAM block list */
4188 while (!ret
&& total_ram_bytes
) {
4192 int len
= qemu_get_byte(f
);
4194 qemu_get_buffer(f
, (uint8_t *)id
, len
);
4196 length
= qemu_get_be64(f
);
4198 block
= qemu_ram_block_by_name(id
);
4200 ret
= parse_ramblock(f
, block
, length
);
4202 error_report("Unknown ramblock \"%s\", cannot accept "
4206 total_ram_bytes
-= length
;
4213 * ram_load_precopy: load pages in precopy case
4215 * Returns 0 for success or -errno in case of error
4217 * Called in precopy mode by ram_load().
4218 * rcu_read_lock is taken prior to this being called.
4220 * @f: QEMUFile where to send the data
4222 static int ram_load_precopy(QEMUFile
*f
)
4224 MigrationIncomingState
*mis
= migration_incoming_get_current();
4225 int flags
= 0, ret
= 0, invalid_flags
= 0, len
= 0, i
= 0;
4227 if (!migrate_compress()) {
4228 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
4231 if (migrate_mapped_ram()) {
4232 invalid_flags
|= (RAM_SAVE_FLAG_HOOK
| RAM_SAVE_FLAG_MULTIFD_FLUSH
|
4233 RAM_SAVE_FLAG_PAGE
| RAM_SAVE_FLAG_XBZRLE
|
4234 RAM_SAVE_FLAG_ZERO
);
4237 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
4239 void *host
= NULL
, *host_bak
= NULL
;
4243 * Yield periodically to let main loop run, but an iteration of
4244 * the main loop is expensive, so do it each some iterations
4246 if ((i
& 32767) == 0 && qemu_in_coroutine()) {
4247 aio_co_schedule(qemu_get_current_aio_context(),
4248 qemu_coroutine_self());
4249 qemu_coroutine_yield();
4253 addr
= qemu_get_be64(f
);
4254 ret
= qemu_file_get_error(f
);
4256 error_report("Getting RAM address failed");
4260 flags
= addr
& ~TARGET_PAGE_MASK
;
4261 addr
&= TARGET_PAGE_MASK
;
4263 if (flags
& invalid_flags
) {
4264 error_report("Unexpected RAM flags: %d", flags
& invalid_flags
);
4266 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
4267 error_report("Received an unexpected compressed page");
4274 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
4275 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
4276 RAMBlock
*block
= ram_block_from_stream(mis
, f
, flags
,
4277 RAM_CHANNEL_PRECOPY
);
4279 host
= host_from_ram_block_offset(block
, addr
);
4281 * After going into COLO stage, we should not load the page
4282 * into SVM's memory directly, we put them into colo_cache firstly.
4283 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
4284 * Previously, we copied all these memory in preparing stage of COLO
4285 * while we need to stop VM, which is a time-consuming process.
4286 * Here we optimize it by a trick, back-up every page while in
4287 * migration process while COLO is enabled, though it affects the
4288 * speed of the migration, but it obviously reduce the downtime of
4289 * back-up all SVM'S memory in COLO preparing stage.
4291 if (migration_incoming_colo_enabled()) {
4292 if (migration_incoming_in_colo_state()) {
4293 /* In COLO stage, put all pages into cache temporarily */
4294 host
= colo_cache_from_block_offset(block
, addr
, true);
4297 * In migration stage but before COLO stage,
4298 * Put all pages into both cache and SVM's memory.
4300 host_bak
= colo_cache_from_block_offset(block
, addr
, false);
4304 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
4308 if (!migration_incoming_in_colo_state()) {
4309 ramblock_recv_bitmap_set(block
, host
);
4312 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
4315 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
4316 case RAM_SAVE_FLAG_MEM_SIZE
:
4317 ret
= parse_ramblocks(f
, addr
);
4319 * For mapped-ram migration (to a file) using multifd, we sync
4320 * once and for all here to make sure all tasks we queued to
4321 * multifd threads are completed, so that all the ramblocks
4322 * (including all the guest memory pages within) are fully
4323 * loaded after this sync returns.
4325 if (migrate_mapped_ram()) {
4326 multifd_recv_sync_main();
4330 case RAM_SAVE_FLAG_ZERO
:
4331 ch
= qemu_get_byte(f
);
4333 error_report("Found a zero page with value %d", ch
);
4337 ram_handle_zero(host
, TARGET_PAGE_SIZE
);
4340 case RAM_SAVE_FLAG_PAGE
:
4341 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
4344 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
4345 len
= qemu_get_be32(f
);
4346 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
4347 error_report("Invalid compressed data length: %d", len
);
4351 decompress_data_with_multi_threads(f
, host
, len
);
4354 case RAM_SAVE_FLAG_XBZRLE
:
4355 if (load_xbzrle(f
, addr
, host
) < 0) {
4356 error_report("Failed to decompress XBZRLE page at "
4357 RAM_ADDR_FMT
, addr
);
4362 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
4363 multifd_recv_sync_main();
4365 case RAM_SAVE_FLAG_EOS
:
4367 if (migrate_multifd() &&
4368 migrate_multifd_flush_after_each_section() &&
4370 * Mapped-ram migration flushes once and for all after
4371 * parsing ramblocks. Always ignore EOS for it.
4373 !migrate_mapped_ram()) {
4374 multifd_recv_sync_main();
4377 case RAM_SAVE_FLAG_HOOK
:
4378 ret
= rdma_registration_handle(f
);
4380 qemu_file_set_error(f
, ret
);
4384 error_report("Unknown combination of migration flags: 0x%x", flags
);
4388 ret
= qemu_file_get_error(f
);
4390 if (!ret
&& host_bak
) {
4391 memcpy(host_bak
, host
, TARGET_PAGE_SIZE
);
4395 ret
|= wait_for_decompress_done();
4399 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
4402 static uint64_t seq_iter
;
4404 * If system is running in postcopy mode, page inserts to host memory must
4407 bool postcopy_running
= postcopy_is_running();
4411 if (version_id
!= 4) {
4416 * This RCU critical section can be very long running.
4417 * When RCU reclaims in the code start to become numerous,
4418 * it will be necessary to reduce the granularity of this
4421 WITH_RCU_READ_LOCK_GUARD() {
4422 if (postcopy_running
) {
4424 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of
4425 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to
4426 * service fast page faults.
4428 ret
= ram_load_postcopy(f
, RAM_CHANNEL_PRECOPY
);
4430 ret
= ram_load_precopy(f
);
4433 trace_ram_load_complete(ret
, seq_iter
);
4438 static bool ram_has_postcopy(void *opaque
)
4441 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
4442 if (ramblock_is_pmem(rb
)) {
4443 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4444 "is not supported now!", rb
->idstr
, rb
->host
);
4449 return migrate_postcopy_ram();
4452 /* Sync all the dirty bitmap with destination VM. */
4453 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
4456 QEMUFile
*file
= s
->to_dst_file
;
4458 trace_ram_dirty_bitmap_sync_start();
4460 qatomic_set(&rs
->postcopy_bmap_sync_requested
, 0);
4461 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
4462 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
4463 trace_ram_dirty_bitmap_request(block
->idstr
);
4464 qatomic_inc(&rs
->postcopy_bmap_sync_requested
);
4467 trace_ram_dirty_bitmap_sync_wait();
4469 /* Wait until all the ramblocks' dirty bitmap synced */
4470 while (qatomic_read(&rs
->postcopy_bmap_sync_requested
)) {
4471 if (migration_rp_wait(s
)) {
4476 trace_ram_dirty_bitmap_sync_complete();
4482 * Read the received bitmap, revert it as the initial dirty bitmap.
4483 * This is only used when the postcopy migration is paused but wants
4484 * to resume from a middle point.
4486 * Returns true if succeeded, false for errors.
4488 bool ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
, Error
**errp
)
4490 /* from_dst_file is always valid because we're within rp_thread */
4491 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
4492 g_autofree
unsigned long *le_bitmap
= NULL
;
4493 unsigned long nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
4494 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
4495 uint64_t size
, end_mark
;
4496 RAMState
*rs
= ram_state
;
4498 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
4500 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
4501 error_setg(errp
, "Reload bitmap in incorrect state %s",
4502 MigrationStatus_str(s
->state
));
4507 * Note: see comments in ramblock_recv_bitmap_send() on why we
4508 * need the endianness conversion, and the paddings.
4510 local_size
= ROUND_UP(local_size
, 8);
4513 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
4515 size
= qemu_get_be64(file
);
4517 /* The size of the bitmap should match with our ramblock */
4518 if (size
!= local_size
) {
4519 error_setg(errp
, "ramblock '%s' bitmap size mismatch (0x%"PRIx64
4520 " != 0x%"PRIx64
")", block
->idstr
, size
, local_size
);
4524 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4525 end_mark
= qemu_get_be64(file
);
4527 if (qemu_file_get_error(file
) || size
!= local_size
) {
4528 error_setg(errp
, "read bitmap failed for ramblock '%s': "
4529 "(size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4530 block
->idstr
, local_size
, size
);
4534 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4535 error_setg(errp
, "ramblock '%s' end mark incorrect: 0x%"PRIx64
,
4536 block
->idstr
, end_mark
);
4541 * Endianness conversion. We are during postcopy (though paused).
4542 * The dirty bitmap won't change. We can directly modify it.
4544 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4547 * What we received is "received bitmap". Revert it as the initial
4548 * dirty bitmap for this ramblock.
4550 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4552 /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4553 ramblock_dirty_bitmap_clear_discarded_pages(block
);
4555 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4556 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4558 qatomic_dec(&rs
->postcopy_bmap_sync_requested
);
4561 * We succeeded to sync bitmap for current ramblock. Always kick the
4562 * migration thread to check whether all requested bitmaps are
4563 * reloaded. NOTE: it's racy to only kick when requested==0, because
4564 * we don't know whether the migration thread may still be increasing
4567 migration_rp_kick(s
);
4572 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4574 RAMState
*rs
= *(RAMState
**)opaque
;
4577 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4582 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4587 void postcopy_preempt_shutdown_file(MigrationState
*s
)
4589 qemu_put_be64(s
->postcopy_qemufile_src
, RAM_SAVE_FLAG_EOS
);
4590 qemu_fflush(s
->postcopy_qemufile_src
);
4593 static SaveVMHandlers savevm_ram_handlers
= {
4594 .save_setup
= ram_save_setup
,
4595 .save_live_iterate
= ram_save_iterate
,
4596 .save_live_complete_postcopy
= ram_save_complete
,
4597 .save_live_complete_precopy
= ram_save_complete
,
4598 .has_postcopy
= ram_has_postcopy
,
4599 .state_pending_exact
= ram_state_pending_exact
,
4600 .state_pending_estimate
= ram_state_pending_estimate
,
4601 .load_state
= ram_load
,
4602 .save_cleanup
= ram_save_cleanup
,
4603 .load_setup
= ram_load_setup
,
4604 .load_cleanup
= ram_load_cleanup
,
4605 .resume_prepare
= ram_resume_prepare
,
4608 static void ram_mig_ram_block_resized(RAMBlockNotifier
*n
, void *host
,
4609 size_t old_size
, size_t new_size
)
4611 PostcopyState ps
= postcopy_state_get();
4613 RAMBlock
*rb
= qemu_ram_block_from_host(host
, false, &offset
);
4617 error_report("RAM block not found");
4621 if (migrate_ram_is_ignored(rb
)) {
4625 if (!migration_is_idle()) {
4627 * Precopy code on the source cannot deal with the size of RAM blocks
4628 * changing at random points in time - especially after sending the
4629 * RAM block sizes in the migration stream, they must no longer change.
4630 * Abort and indicate a proper reason.
4632 error_setg(&err
, "RAM block '%s' resized during precopy.", rb
->idstr
);
4633 migration_cancel(err
);
4638 case POSTCOPY_INCOMING_ADVISE
:
4640 * Update what ram_postcopy_incoming_init()->init_range() does at the
4641 * time postcopy was advised. Syncing RAM blocks with the source will
4642 * result in RAM resizes.
4644 if (old_size
< new_size
) {
4645 if (ram_discard_range(rb
->idstr
, old_size
, new_size
- old_size
)) {
4646 error_report("RAM block '%s' discard of resized RAM failed",
4650 rb
->postcopy_length
= new_size
;
4652 case POSTCOPY_INCOMING_NONE
:
4653 case POSTCOPY_INCOMING_RUNNING
:
4654 case POSTCOPY_INCOMING_END
:
4656 * Once our guest is running, postcopy does no longer care about
4657 * resizes. When growing, the new memory was not available on the
4658 * source, no handler needed.
4662 error_report("RAM block '%s' resized during postcopy state: %d",
4668 static RAMBlockNotifier ram_mig_ram_notifier
= {
4669 .ram_block_resized
= ram_mig_ram_block_resized
,
4672 void ram_mig_init(void)
4674 qemu_mutex_init(&XBZRLE
.lock
);
4675 register_savevm_live("ram", 0, 4, &savevm_ram_handlers
, &ram_state
);
4676 ram_block_notifier_add(&ram_mig_ram_notifier
);