4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include "qemu/osdep.h"
29 #include "qemu-common.h"
32 #include "qapi-event.h"
33 #include "qemu/cutils.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "qemu/timer.h"
37 #include "qemu/main-loop.h"
38 #include "migration/migration.h"
39 #include "migration/postcopy-ram.h"
40 #include "exec/address-spaces.h"
41 #include "migration/page_cache.h"
42 #include "qemu/error-report.h"
44 #include "exec/ram_addr.h"
45 #include "qemu/rcu_queue.h"
46 #include "migration/colo.h"
48 static int dirty_rate_high_cnt
;
50 static uint64_t bitmap_sync_count
;
52 /***********************************************************/
53 /* ram save/restore */
55 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
56 #define RAM_SAVE_FLAG_COMPRESS 0x02
57 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
58 #define RAM_SAVE_FLAG_PAGE 0x08
59 #define RAM_SAVE_FLAG_EOS 0x10
60 #define RAM_SAVE_FLAG_CONTINUE 0x20
61 #define RAM_SAVE_FLAG_XBZRLE 0x40
62 /* 0x80 is reserved in migration.h start with 0x100 next */
63 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
65 static uint8_t *ZERO_TARGET_PAGE
;
67 static inline bool is_zero_range(uint8_t *p
, uint64_t size
)
69 return buffer_is_zero(p
, size
);
72 /* struct contains XBZRLE cache and a static page
73 used by the compression */
75 /* buffer used for XBZRLE encoding */
77 /* buffer for storing page content */
79 /* Cache for XBZRLE, Protected by lock. */
84 /* buffer used for XBZRLE decoding */
85 static uint8_t *xbzrle_decoded_buf
;
87 static void XBZRLE_cache_lock(void)
89 if (migrate_use_xbzrle())
90 qemu_mutex_lock(&XBZRLE
.lock
);
93 static void XBZRLE_cache_unlock(void)
95 if (migrate_use_xbzrle())
96 qemu_mutex_unlock(&XBZRLE
.lock
);
100 * called from qmp_migrate_set_cache_size in main thread, possibly while
101 * a migration is in progress.
102 * A running migration maybe using the cache and might finish during this
103 * call, hence changes to the cache are protected by XBZRLE.lock().
105 int64_t xbzrle_cache_resize(int64_t new_size
)
107 PageCache
*new_cache
;
110 if (new_size
< TARGET_PAGE_SIZE
) {
116 if (XBZRLE
.cache
!= NULL
) {
117 if (pow2floor(new_size
) == migrate_xbzrle_cache_size()) {
120 new_cache
= cache_init(new_size
/ TARGET_PAGE_SIZE
,
123 error_report("Error creating cache");
128 cache_fini(XBZRLE
.cache
);
129 XBZRLE
.cache
= new_cache
;
133 ret
= pow2floor(new_size
);
135 XBZRLE_cache_unlock();
139 /* accounting for migration statistics */
140 typedef struct AccountingInfo
{
142 uint64_t skipped_pages
;
145 uint64_t xbzrle_bytes
;
146 uint64_t xbzrle_pages
;
147 uint64_t xbzrle_cache_miss
;
148 double xbzrle_cache_miss_rate
;
149 uint64_t xbzrle_overflows
;
152 static AccountingInfo acct_info
;
154 static void acct_clear(void)
156 memset(&acct_info
, 0, sizeof(acct_info
));
159 uint64_t dup_mig_bytes_transferred(void)
161 return acct_info
.dup_pages
* TARGET_PAGE_SIZE
;
164 uint64_t dup_mig_pages_transferred(void)
166 return acct_info
.dup_pages
;
169 uint64_t skipped_mig_bytes_transferred(void)
171 return acct_info
.skipped_pages
* TARGET_PAGE_SIZE
;
174 uint64_t skipped_mig_pages_transferred(void)
176 return acct_info
.skipped_pages
;
179 uint64_t norm_mig_bytes_transferred(void)
181 return acct_info
.norm_pages
* TARGET_PAGE_SIZE
;
184 uint64_t norm_mig_pages_transferred(void)
186 return acct_info
.norm_pages
;
189 uint64_t xbzrle_mig_bytes_transferred(void)
191 return acct_info
.xbzrle_bytes
;
194 uint64_t xbzrle_mig_pages_transferred(void)
196 return acct_info
.xbzrle_pages
;
199 uint64_t xbzrle_mig_pages_cache_miss(void)
201 return acct_info
.xbzrle_cache_miss
;
204 double xbzrle_mig_cache_miss_rate(void)
206 return acct_info
.xbzrle_cache_miss_rate
;
209 uint64_t xbzrle_mig_pages_overflow(void)
211 return acct_info
.xbzrle_overflows
;
214 /* This is the last block that we have visited serching for dirty pages
216 static RAMBlock
*last_seen_block
;
217 /* This is the last block from where we have sent data */
218 static RAMBlock
*last_sent_block
;
219 static ram_addr_t last_offset
;
220 static QemuMutex migration_bitmap_mutex
;
221 static uint64_t migration_dirty_pages
;
222 static uint32_t last_version
;
223 static bool ram_bulk_stage
;
225 /* used by the search for pages to send */
226 struct PageSearchStatus
{
227 /* Current block being searched */
229 /* Current offset to search from */
231 /* Set once we wrap around */
234 typedef struct PageSearchStatus PageSearchStatus
;
236 static struct BitmapRcu
{
238 /* Main migration bitmap */
240 /* bitmap of pages that haven't been sent even once
241 * only maintained and used in postcopy at the moment
242 * where it's used to send the dirtymap at the start
243 * of the postcopy phase
245 unsigned long *unsentmap
;
246 } *migration_bitmap_rcu
;
248 struct CompressParam
{
257 typedef struct CompressParam CompressParam
;
259 struct DecompressParam
{
268 typedef struct DecompressParam DecompressParam
;
270 static CompressParam
*comp_param
;
271 static QemuThread
*compress_threads
;
272 /* comp_done_cond is used to wake up the migration thread when
273 * one of the compression threads has finished the compression.
274 * comp_done_lock is used to co-work with comp_done_cond.
276 static QemuMutex comp_done_lock
;
277 static QemuCond comp_done_cond
;
278 /* The empty QEMUFileOps will be used by file in CompressParam */
279 static const QEMUFileOps empty_ops
= { };
281 static bool compression_switch
;
282 static DecompressParam
*decomp_param
;
283 static QemuThread
*decompress_threads
;
284 static QemuMutex decomp_done_lock
;
285 static QemuCond decomp_done_cond
;
287 static int do_compress_ram_page(QEMUFile
*f
, RAMBlock
*block
,
290 static void *do_data_compress(void *opaque
)
292 CompressParam
*param
= opaque
;
296 qemu_mutex_lock(¶m
->mutex
);
297 while (!param
->quit
) {
299 block
= param
->block
;
300 offset
= param
->offset
;
302 qemu_mutex_unlock(¶m
->mutex
);
304 do_compress_ram_page(param
->file
, block
, offset
);
306 qemu_mutex_lock(&comp_done_lock
);
308 qemu_cond_signal(&comp_done_cond
);
309 qemu_mutex_unlock(&comp_done_lock
);
311 qemu_mutex_lock(¶m
->mutex
);
313 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
316 qemu_mutex_unlock(¶m
->mutex
);
321 static inline void terminate_compression_threads(void)
323 int idx
, thread_count
;
325 thread_count
= migrate_compress_threads();
326 for (idx
= 0; idx
< thread_count
; idx
++) {
327 qemu_mutex_lock(&comp_param
[idx
].mutex
);
328 comp_param
[idx
].quit
= true;
329 qemu_cond_signal(&comp_param
[idx
].cond
);
330 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
334 void migrate_compress_threads_join(void)
338 if (!migrate_use_compression()) {
341 terminate_compression_threads();
342 thread_count
= migrate_compress_threads();
343 for (i
= 0; i
< thread_count
; i
++) {
344 qemu_thread_join(compress_threads
+ i
);
345 qemu_fclose(comp_param
[i
].file
);
346 qemu_mutex_destroy(&comp_param
[i
].mutex
);
347 qemu_cond_destroy(&comp_param
[i
].cond
);
349 qemu_mutex_destroy(&comp_done_lock
);
350 qemu_cond_destroy(&comp_done_cond
);
351 g_free(compress_threads
);
353 compress_threads
= NULL
;
357 void migrate_compress_threads_create(void)
361 if (!migrate_use_compression()) {
364 compression_switch
= true;
365 thread_count
= migrate_compress_threads();
366 compress_threads
= g_new0(QemuThread
, thread_count
);
367 comp_param
= g_new0(CompressParam
, thread_count
);
368 qemu_cond_init(&comp_done_cond
);
369 qemu_mutex_init(&comp_done_lock
);
370 for (i
= 0; i
< thread_count
; i
++) {
371 /* comp_param[i].file is just used as a dummy buffer to save data,
372 * set its ops to empty.
374 comp_param
[i
].file
= qemu_fopen_ops(NULL
, &empty_ops
);
375 comp_param
[i
].done
= true;
376 comp_param
[i
].quit
= false;
377 qemu_mutex_init(&comp_param
[i
].mutex
);
378 qemu_cond_init(&comp_param
[i
].cond
);
379 qemu_thread_create(compress_threads
+ i
, "compress",
380 do_data_compress
, comp_param
+ i
,
381 QEMU_THREAD_JOINABLE
);
386 * save_page_header: Write page header to wire
388 * If this is the 1st block, it also writes the block identification
390 * Returns: Number of bytes written
392 * @f: QEMUFile where to send the data
393 * @block: block that contains the page we want to send
394 * @offset: offset inside the block for the page
395 * in the lower bits, it contains flags
397 static size_t save_page_header(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t offset
)
401 qemu_put_be64(f
, offset
);
404 if (!(offset
& RAM_SAVE_FLAG_CONTINUE
)) {
405 len
= strlen(block
->idstr
);
406 qemu_put_byte(f
, len
);
407 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
413 /* Reduce amount of guest cpu execution to hopefully slow down memory writes.
414 * If guest dirty memory rate is reduced below the rate at which we can
415 * transfer pages to the destination then we should be able to complete
416 * migration. Some workloads dirty memory way too fast and will not effectively
417 * converge, even with auto-converge.
419 static void mig_throttle_guest_down(void)
421 MigrationState
*s
= migrate_get_current();
422 uint64_t pct_initial
= s
->parameters
.cpu_throttle_initial
;
423 uint64_t pct_icrement
= s
->parameters
.cpu_throttle_increment
;
425 /* We have not started throttling yet. Let's start it. */
426 if (!cpu_throttle_active()) {
427 cpu_throttle_set(pct_initial
);
429 /* Throttling already on, just increase the rate */
430 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement
);
434 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
435 * The important thing is that a stale (not-yet-0'd) page be replaced
437 * As a bonus, if the page wasn't in the cache it gets added so that
438 * when a small write is made into the 0'd page it gets XBZRLE sent
440 static void xbzrle_cache_zero_page(ram_addr_t current_addr
)
442 if (ram_bulk_stage
|| !migrate_use_xbzrle()) {
446 /* We don't care if this fails to allocate a new cache page
447 * as long as it updated an old one */
448 cache_insert(XBZRLE
.cache
, current_addr
, ZERO_TARGET_PAGE
,
452 #define ENCODING_FLAG_XBZRLE 0x1
455 * save_xbzrle_page: compress and send current page
457 * Returns: 1 means that we wrote the page
458 * 0 means that page is identical to the one already sent
459 * -1 means that xbzrle would be longer than normal
461 * @f: QEMUFile where to send the data
464 * @block: block that contains the page we want to send
465 * @offset: offset inside the block for the page
466 * @last_stage: if we are at the completion stage
467 * @bytes_transferred: increase it with the number of transferred bytes
469 static int save_xbzrle_page(QEMUFile
*f
, uint8_t **current_data
,
470 ram_addr_t current_addr
, RAMBlock
*block
,
471 ram_addr_t offset
, bool last_stage
,
472 uint64_t *bytes_transferred
)
474 int encoded_len
= 0, bytes_xbzrle
;
475 uint8_t *prev_cached_page
;
477 if (!cache_is_cached(XBZRLE
.cache
, current_addr
, bitmap_sync_count
)) {
478 acct_info
.xbzrle_cache_miss
++;
480 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
481 bitmap_sync_count
) == -1) {
484 /* update *current_data when the page has been
485 inserted into cache */
486 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
492 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
494 /* save current buffer into memory */
495 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
497 /* XBZRLE encoding (if there is no overflow) */
498 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
499 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
501 if (encoded_len
== 0) {
502 trace_save_xbzrle_page_skipping();
504 } else if (encoded_len
== -1) {
505 trace_save_xbzrle_page_overflow();
506 acct_info
.xbzrle_overflows
++;
507 /* update data in the cache */
509 memcpy(prev_cached_page
, *current_data
, TARGET_PAGE_SIZE
);
510 *current_data
= prev_cached_page
;
515 /* we need to update the data in the cache, in order to get the same data */
517 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
520 /* Send XBZRLE based compressed page */
521 bytes_xbzrle
= save_page_header(f
, block
, offset
| RAM_SAVE_FLAG_XBZRLE
);
522 qemu_put_byte(f
, ENCODING_FLAG_XBZRLE
);
523 qemu_put_be16(f
, encoded_len
);
524 qemu_put_buffer(f
, XBZRLE
.encoded_buf
, encoded_len
);
525 bytes_xbzrle
+= encoded_len
+ 1 + 2;
526 acct_info
.xbzrle_pages
++;
527 acct_info
.xbzrle_bytes
+= bytes_xbzrle
;
528 *bytes_transferred
+= bytes_xbzrle
;
533 /* Called with rcu_read_lock() to protect migration_bitmap
534 * rb: The RAMBlock to search for dirty pages in
535 * start: Start address (typically so we can continue from previous page)
536 * ram_addr_abs: Pointer into which to store the address of the dirty page
537 * within the global ram_addr space
539 * Returns: byte offset within memory region of the start of a dirty page
542 ram_addr_t
migration_bitmap_find_dirty(RAMBlock
*rb
,
544 ram_addr_t
*ram_addr_abs
)
546 unsigned long base
= rb
->offset
>> TARGET_PAGE_BITS
;
547 unsigned long nr
= base
+ (start
>> TARGET_PAGE_BITS
);
548 uint64_t rb_size
= rb
->used_length
;
549 unsigned long size
= base
+ (rb_size
>> TARGET_PAGE_BITS
);
550 unsigned long *bitmap
;
554 bitmap
= atomic_rcu_read(&migration_bitmap_rcu
)->bmap
;
555 if (ram_bulk_stage
&& nr
> base
) {
558 next
= find_next_bit(bitmap
, size
, nr
);
561 *ram_addr_abs
= next
<< TARGET_PAGE_BITS
;
562 return (next
- base
) << TARGET_PAGE_BITS
;
565 static inline bool migration_bitmap_clear_dirty(ram_addr_t addr
)
568 int nr
= addr
>> TARGET_PAGE_BITS
;
569 unsigned long *bitmap
= atomic_rcu_read(&migration_bitmap_rcu
)->bmap
;
571 ret
= test_and_clear_bit(nr
, bitmap
);
574 migration_dirty_pages
--;
579 static void migration_bitmap_sync_range(ram_addr_t start
, ram_addr_t length
)
581 unsigned long *bitmap
;
582 bitmap
= atomic_rcu_read(&migration_bitmap_rcu
)->bmap
;
583 migration_dirty_pages
+=
584 cpu_physical_memory_sync_dirty_bitmap(bitmap
, start
, length
);
587 /* Fix me: there are too many global variables used in migration process. */
588 static int64_t start_time
;
589 static int64_t bytes_xfer_prev
;
590 static int64_t num_dirty_pages_period
;
591 static uint64_t xbzrle_cache_miss_prev
;
592 static uint64_t iterations_prev
;
594 static void migration_bitmap_sync_init(void)
598 num_dirty_pages_period
= 0;
599 xbzrle_cache_miss_prev
= 0;
603 static void migration_bitmap_sync(void)
606 uint64_t num_dirty_pages_init
= migration_dirty_pages
;
607 MigrationState
*s
= migrate_get_current();
609 int64_t bytes_xfer_now
;
613 if (!bytes_xfer_prev
) {
614 bytes_xfer_prev
= ram_bytes_transferred();
618 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
621 trace_migration_bitmap_sync_start();
622 memory_global_dirty_log_sync();
624 qemu_mutex_lock(&migration_bitmap_mutex
);
626 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
627 migration_bitmap_sync_range(block
->offset
, block
->used_length
);
630 qemu_mutex_unlock(&migration_bitmap_mutex
);
632 trace_migration_bitmap_sync_end(migration_dirty_pages
633 - num_dirty_pages_init
);
634 num_dirty_pages_period
+= migration_dirty_pages
- num_dirty_pages_init
;
635 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
637 /* more than 1 second = 1000 millisecons */
638 if (end_time
> start_time
+ 1000) {
639 if (migrate_auto_converge()) {
640 /* The following detection logic can be refined later. For now:
641 Check to see if the dirtied bytes is 50% more than the approx.
642 amount of bytes that just got transferred since the last time we
643 were in this routine. If that happens twice, start or increase
645 bytes_xfer_now
= ram_bytes_transferred();
647 if (s
->dirty_pages_rate
&&
648 (num_dirty_pages_period
* TARGET_PAGE_SIZE
>
649 (bytes_xfer_now
- bytes_xfer_prev
)/2) &&
650 (dirty_rate_high_cnt
++ >= 2)) {
651 trace_migration_throttle();
652 dirty_rate_high_cnt
= 0;
653 mig_throttle_guest_down();
655 bytes_xfer_prev
= bytes_xfer_now
;
658 if (migrate_use_xbzrle()) {
659 if (iterations_prev
!= acct_info
.iterations
) {
660 acct_info
.xbzrle_cache_miss_rate
=
661 (double)(acct_info
.xbzrle_cache_miss
-
662 xbzrle_cache_miss_prev
) /
663 (acct_info
.iterations
- iterations_prev
);
665 iterations_prev
= acct_info
.iterations
;
666 xbzrle_cache_miss_prev
= acct_info
.xbzrle_cache_miss
;
668 s
->dirty_pages_rate
= num_dirty_pages_period
* 1000
669 / (end_time
- start_time
);
670 s
->dirty_bytes_rate
= s
->dirty_pages_rate
* TARGET_PAGE_SIZE
;
671 start_time
= end_time
;
672 num_dirty_pages_period
= 0;
674 s
->dirty_sync_count
= bitmap_sync_count
;
675 if (migrate_use_events()) {
676 qapi_event_send_migration_pass(bitmap_sync_count
, NULL
);
681 * save_zero_page: Send the zero page to the stream
683 * Returns: Number of pages written.
685 * @f: QEMUFile where to send the data
686 * @block: block that contains the page we want to send
687 * @offset: offset inside the block for the page
688 * @p: pointer to the page
689 * @bytes_transferred: increase it with the number of transferred bytes
691 static int save_zero_page(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t offset
,
692 uint8_t *p
, uint64_t *bytes_transferred
)
696 if (is_zero_range(p
, TARGET_PAGE_SIZE
)) {
697 acct_info
.dup_pages
++;
698 *bytes_transferred
+= save_page_header(f
, block
,
699 offset
| RAM_SAVE_FLAG_COMPRESS
);
701 *bytes_transferred
+= 1;
708 static void ram_release_pages(MigrationState
*ms
, const char *block_name
,
709 uint64_t offset
, int pages
)
711 if (!migrate_release_ram() || !migration_in_postcopy(ms
)) {
715 ram_discard_range(NULL
, block_name
, offset
, pages
<< TARGET_PAGE_BITS
);
719 * ram_save_page: Send the given page to the stream
721 * Returns: Number of pages written.
723 * >=0 - Number of pages written - this might legally be 0
724 * if xbzrle noticed the page was the same.
726 * @ms: The current migration state.
727 * @f: QEMUFile where to send the data
728 * @block: block that contains the page we want to send
729 * @offset: offset inside the block for the page
730 * @last_stage: if we are at the completion stage
731 * @bytes_transferred: increase it with the number of transferred bytes
733 static int ram_save_page(MigrationState
*ms
, QEMUFile
*f
, PageSearchStatus
*pss
,
734 bool last_stage
, uint64_t *bytes_transferred
)
738 ram_addr_t current_addr
;
741 bool send_async
= true;
742 RAMBlock
*block
= pss
->block
;
743 ram_addr_t offset
= pss
->offset
;
745 p
= block
->host
+ offset
;
747 /* In doubt sent page as normal */
749 ret
= ram_control_save_page(f
, block
->offset
,
750 offset
, TARGET_PAGE_SIZE
, &bytes_xmit
);
752 *bytes_transferred
+= bytes_xmit
;
758 current_addr
= block
->offset
+ offset
;
760 if (block
== last_sent_block
) {
761 offset
|= RAM_SAVE_FLAG_CONTINUE
;
763 if (ret
!= RAM_SAVE_CONTROL_NOT_SUPP
) {
764 if (ret
!= RAM_SAVE_CONTROL_DELAYED
) {
765 if (bytes_xmit
> 0) {
766 acct_info
.norm_pages
++;
767 } else if (bytes_xmit
== 0) {
768 acct_info
.dup_pages
++;
772 pages
= save_zero_page(f
, block
, offset
, p
, bytes_transferred
);
774 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
775 * page would be stale
777 xbzrle_cache_zero_page(current_addr
);
778 ram_release_pages(ms
, block
->idstr
, pss
->offset
, pages
);
779 } else if (!ram_bulk_stage
&&
780 !migration_in_postcopy(ms
) && migrate_use_xbzrle()) {
781 pages
= save_xbzrle_page(f
, &p
, current_addr
, block
,
782 offset
, last_stage
, bytes_transferred
);
784 /* Can't send this cached data async, since the cache page
785 * might get updated before it gets to the wire
792 /* XBZRLE overflow or normal page */
794 *bytes_transferred
+= save_page_header(f
, block
,
795 offset
| RAM_SAVE_FLAG_PAGE
);
797 qemu_put_buffer_async(f
, p
, TARGET_PAGE_SIZE
,
798 migrate_release_ram() &
799 migration_in_postcopy(ms
));
801 qemu_put_buffer(f
, p
, TARGET_PAGE_SIZE
);
803 *bytes_transferred
+= TARGET_PAGE_SIZE
;
805 acct_info
.norm_pages
++;
808 XBZRLE_cache_unlock();
813 static int do_compress_ram_page(QEMUFile
*f
, RAMBlock
*block
,
816 int bytes_sent
, blen
;
817 uint8_t *p
= block
->host
+ (offset
& TARGET_PAGE_MASK
);
819 bytes_sent
= save_page_header(f
, block
, offset
|
820 RAM_SAVE_FLAG_COMPRESS_PAGE
);
821 blen
= qemu_put_compression_data(f
, p
, TARGET_PAGE_SIZE
,
822 migrate_compress_level());
825 qemu_file_set_error(migrate_get_current()->to_dst_file
, blen
);
826 error_report("compressed data failed!");
829 ram_release_pages(migrate_get_current(), block
->idstr
,
830 offset
& TARGET_PAGE_MASK
, 1);
836 static uint64_t bytes_transferred
;
838 static void flush_compressed_data(QEMUFile
*f
)
840 int idx
, len
, thread_count
;
842 if (!migrate_use_compression()) {
845 thread_count
= migrate_compress_threads();
847 qemu_mutex_lock(&comp_done_lock
);
848 for (idx
= 0; idx
< thread_count
; idx
++) {
849 while (!comp_param
[idx
].done
) {
850 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
853 qemu_mutex_unlock(&comp_done_lock
);
855 for (idx
= 0; idx
< thread_count
; idx
++) {
856 qemu_mutex_lock(&comp_param
[idx
].mutex
);
857 if (!comp_param
[idx
].quit
) {
858 len
= qemu_put_qemu_file(f
, comp_param
[idx
].file
);
859 bytes_transferred
+= len
;
861 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
865 static inline void set_compress_params(CompressParam
*param
, RAMBlock
*block
,
868 param
->block
= block
;
869 param
->offset
= offset
;
872 static int compress_page_with_multi_thread(QEMUFile
*f
, RAMBlock
*block
,
874 uint64_t *bytes_transferred
)
876 int idx
, thread_count
, bytes_xmit
= -1, pages
= -1;
878 thread_count
= migrate_compress_threads();
879 qemu_mutex_lock(&comp_done_lock
);
881 for (idx
= 0; idx
< thread_count
; idx
++) {
882 if (comp_param
[idx
].done
) {
883 comp_param
[idx
].done
= false;
884 bytes_xmit
= qemu_put_qemu_file(f
, comp_param
[idx
].file
);
885 qemu_mutex_lock(&comp_param
[idx
].mutex
);
886 set_compress_params(&comp_param
[idx
], block
, offset
);
887 qemu_cond_signal(&comp_param
[idx
].cond
);
888 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
890 acct_info
.norm_pages
++;
891 *bytes_transferred
+= bytes_xmit
;
898 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
901 qemu_mutex_unlock(&comp_done_lock
);
907 * ram_save_compressed_page: compress the given page and send it to the stream
909 * Returns: Number of pages written.
911 * @ms: The current migration state.
912 * @f: QEMUFile where to send the data
913 * @block: block that contains the page we want to send
914 * @offset: offset inside the block for the page
915 * @last_stage: if we are at the completion stage
916 * @bytes_transferred: increase it with the number of transferred bytes
918 static int ram_save_compressed_page(MigrationState
*ms
, QEMUFile
*f
,
919 PageSearchStatus
*pss
, bool last_stage
,
920 uint64_t *bytes_transferred
)
923 uint64_t bytes_xmit
= 0;
926 RAMBlock
*block
= pss
->block
;
927 ram_addr_t offset
= pss
->offset
;
929 p
= block
->host
+ offset
;
931 ret
= ram_control_save_page(f
, block
->offset
,
932 offset
, TARGET_PAGE_SIZE
, &bytes_xmit
);
934 *bytes_transferred
+= bytes_xmit
;
937 if (ret
!= RAM_SAVE_CONTROL_NOT_SUPP
) {
938 if (ret
!= RAM_SAVE_CONTROL_DELAYED
) {
939 if (bytes_xmit
> 0) {
940 acct_info
.norm_pages
++;
941 } else if (bytes_xmit
== 0) {
942 acct_info
.dup_pages
++;
946 /* When starting the process of a new block, the first page of
947 * the block should be sent out before other pages in the same
948 * block, and all the pages in last block should have been sent
949 * out, keeping this order is important, because the 'cont' flag
950 * is used to avoid resending the block name.
952 if (block
!= last_sent_block
) {
953 flush_compressed_data(f
);
954 pages
= save_zero_page(f
, block
, offset
, p
, bytes_transferred
);
956 /* Make sure the first page is sent out before other pages */
957 bytes_xmit
= save_page_header(f
, block
, offset
|
958 RAM_SAVE_FLAG_COMPRESS_PAGE
);
959 blen
= qemu_put_compression_data(f
, p
, TARGET_PAGE_SIZE
,
960 migrate_compress_level());
962 *bytes_transferred
+= bytes_xmit
+ blen
;
963 acct_info
.norm_pages
++;
966 qemu_file_set_error(f
, blen
);
967 error_report("compressed data failed!");
971 ram_release_pages(ms
, block
->idstr
, pss
->offset
, pages
);
974 offset
|= RAM_SAVE_FLAG_CONTINUE
;
975 pages
= save_zero_page(f
, block
, offset
, p
, bytes_transferred
);
977 pages
= compress_page_with_multi_thread(f
, block
, offset
,
980 ram_release_pages(ms
, block
->idstr
, pss
->offset
, pages
);
989 * Find the next dirty page and update any state associated with
990 * the search process.
992 * Returns: True if a page is found
994 * @f: Current migration stream.
995 * @pss: Data about the state of the current dirty page scan.
996 * @*again: Set to false if the search has scanned the whole of RAM
997 * *ram_addr_abs: Pointer into which to store the address of the dirty page
998 * within the global ram_addr space
1000 static bool find_dirty_block(QEMUFile
*f
, PageSearchStatus
*pss
,
1001 bool *again
, ram_addr_t
*ram_addr_abs
)
1003 pss
->offset
= migration_bitmap_find_dirty(pss
->block
, pss
->offset
,
1005 if (pss
->complete_round
&& pss
->block
== last_seen_block
&&
1006 pss
->offset
>= last_offset
) {
1008 * We've been once around the RAM and haven't found anything.
1014 if (pss
->offset
>= pss
->block
->used_length
) {
1015 /* Didn't find anything in this RAM Block */
1017 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
1019 /* Hit the end of the list */
1020 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1021 /* Flag that we've looped */
1022 pss
->complete_round
= true;
1023 ram_bulk_stage
= false;
1024 if (migrate_use_xbzrle()) {
1025 /* If xbzrle is on, stop using the data compression at this
1026 * point. In theory, xbzrle can do better than compression.
1028 flush_compressed_data(f
);
1029 compression_switch
= false;
1032 /* Didn't find anything this time, but try again on the new block */
1036 /* Can go around again, but... */
1038 /* We've found something so probably don't need to */
1044 * Helper for 'get_queued_page' - gets a page off the queue
1045 * ms: MigrationState in
1046 * *offset: Used to return the offset within the RAMBlock
1047 * ram_addr_abs: global offset in the dirty/sent bitmaps
1049 * Returns: block (or NULL if none available)
1051 static RAMBlock
*unqueue_page(MigrationState
*ms
, ram_addr_t
*offset
,
1052 ram_addr_t
*ram_addr_abs
)
1054 RAMBlock
*block
= NULL
;
1056 qemu_mutex_lock(&ms
->src_page_req_mutex
);
1057 if (!QSIMPLEQ_EMPTY(&ms
->src_page_requests
)) {
1058 struct MigrationSrcPageRequest
*entry
=
1059 QSIMPLEQ_FIRST(&ms
->src_page_requests
);
1061 *offset
= entry
->offset
;
1062 *ram_addr_abs
= (entry
->offset
+ entry
->rb
->offset
) &
1065 if (entry
->len
> TARGET_PAGE_SIZE
) {
1066 entry
->len
-= TARGET_PAGE_SIZE
;
1067 entry
->offset
+= TARGET_PAGE_SIZE
;
1069 memory_region_unref(block
->mr
);
1070 QSIMPLEQ_REMOVE_HEAD(&ms
->src_page_requests
, next_req
);
1074 qemu_mutex_unlock(&ms
->src_page_req_mutex
);
1080 * Unqueue a page from the queue fed by postcopy page requests; skips pages
1081 * that are already sent (!dirty)
1083 * ms: MigrationState in
1084 * pss: PageSearchStatus structure updated with found block/offset
1085 * ram_addr_abs: global offset in the dirty/sent bitmaps
1087 * Returns: true if a queued page is found
1089 static bool get_queued_page(MigrationState
*ms
, PageSearchStatus
*pss
,
1090 ram_addr_t
*ram_addr_abs
)
1097 block
= unqueue_page(ms
, &offset
, ram_addr_abs
);
1099 * We're sending this page, and since it's postcopy nothing else
1100 * will dirty it, and we must make sure it doesn't get sent again
1101 * even if this queue request was received after the background
1102 * search already sent it.
1105 unsigned long *bitmap
;
1106 bitmap
= atomic_rcu_read(&migration_bitmap_rcu
)->bmap
;
1107 dirty
= test_bit(*ram_addr_abs
>> TARGET_PAGE_BITS
, bitmap
);
1109 trace_get_queued_page_not_dirty(
1110 block
->idstr
, (uint64_t)offset
,
1111 (uint64_t)*ram_addr_abs
,
1112 test_bit(*ram_addr_abs
>> TARGET_PAGE_BITS
,
1113 atomic_rcu_read(&migration_bitmap_rcu
)->unsentmap
));
1115 trace_get_queued_page(block
->idstr
,
1117 (uint64_t)*ram_addr_abs
);
1121 } while (block
&& !dirty
);
1125 * As soon as we start servicing pages out of order, then we have
1126 * to kill the bulk stage, since the bulk stage assumes
1127 * in (migration_bitmap_find_and_reset_dirty) that every page is
1128 * dirty, that's no longer true.
1130 ram_bulk_stage
= false;
1133 * We want the background search to continue from the queued page
1134 * since the guest is likely to want other pages near to the page
1135 * it just requested.
1138 pss
->offset
= offset
;
1145 * flush_page_queue: Flush any remaining pages in the ram request queue
1146 * it should be empty at the end anyway, but in error cases there may be
1149 * ms: MigrationState
1151 void flush_page_queue(MigrationState
*ms
)
1153 struct MigrationSrcPageRequest
*mspr
, *next_mspr
;
1154 /* This queue generally should be empty - but in the case of a failed
1155 * migration might have some droppings in.
1158 QSIMPLEQ_FOREACH_SAFE(mspr
, &ms
->src_page_requests
, next_req
, next_mspr
) {
1159 memory_region_unref(mspr
->rb
->mr
);
1160 QSIMPLEQ_REMOVE_HEAD(&ms
->src_page_requests
, next_req
);
1167 * Queue the pages for transmission, e.g. a request from postcopy destination
1168 * ms: MigrationStatus in which the queue is held
1169 * rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
1170 * start: Offset from the start of the RAMBlock
1171 * len: Length (in bytes) to send
1172 * Return: 0 on success
1174 int ram_save_queue_pages(MigrationState
*ms
, const char *rbname
,
1175 ram_addr_t start
, ram_addr_t len
)
1179 ms
->postcopy_requests
++;
1182 /* Reuse last RAMBlock */
1183 ramblock
= ms
->last_req_rb
;
1187 * Shouldn't happen, we can't reuse the last RAMBlock if
1188 * it's the 1st request.
1190 error_report("ram_save_queue_pages no previous block");
1194 ramblock
= qemu_ram_block_by_name(rbname
);
1197 /* We shouldn't be asked for a non-existent RAMBlock */
1198 error_report("ram_save_queue_pages no block '%s'", rbname
);
1201 ms
->last_req_rb
= ramblock
;
1203 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
1204 if (start
+len
> ramblock
->used_length
) {
1205 error_report("%s request overrun start=" RAM_ADDR_FMT
" len="
1206 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
1207 __func__
, start
, len
, ramblock
->used_length
);
1211 struct MigrationSrcPageRequest
*new_entry
=
1212 g_malloc0(sizeof(struct MigrationSrcPageRequest
));
1213 new_entry
->rb
= ramblock
;
1214 new_entry
->offset
= start
;
1215 new_entry
->len
= len
;
1217 memory_region_ref(ramblock
->mr
);
1218 qemu_mutex_lock(&ms
->src_page_req_mutex
);
1219 QSIMPLEQ_INSERT_TAIL(&ms
->src_page_requests
, new_entry
, next_req
);
1220 qemu_mutex_unlock(&ms
->src_page_req_mutex
);
1231 * ram_save_target_page: Save one target page
1234 * @f: QEMUFile where to send the data
1235 * @block: pointer to block that contains the page we want to send
1236 * @offset: offset inside the block for the page;
1237 * @last_stage: if we are at the completion stage
1238 * @bytes_transferred: increase it with the number of transferred bytes
1239 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1241 * Returns: Number of pages written.
1243 static int ram_save_target_page(MigrationState
*ms
, QEMUFile
*f
,
1244 PageSearchStatus
*pss
,
1246 uint64_t *bytes_transferred
,
1247 ram_addr_t dirty_ram_abs
)
1251 /* Check the pages is dirty and if it is send it */
1252 if (migration_bitmap_clear_dirty(dirty_ram_abs
)) {
1253 unsigned long *unsentmap
;
1254 if (compression_switch
&& migrate_use_compression()) {
1255 res
= ram_save_compressed_page(ms
, f
, pss
,
1259 res
= ram_save_page(ms
, f
, pss
, last_stage
,
1266 unsentmap
= atomic_rcu_read(&migration_bitmap_rcu
)->unsentmap
;
1268 clear_bit(dirty_ram_abs
>> TARGET_PAGE_BITS
, unsentmap
);
1270 /* Only update last_sent_block if a block was actually sent; xbzrle
1271 * might have decided the page was identical so didn't bother writing
1275 last_sent_block
= pss
->block
;
1283 * ram_save_host_page: Starting at *offset send pages up to the end
1284 * of the current host page. It's valid for the initial
1285 * offset to point into the middle of a host page
1286 * in which case the remainder of the hostpage is sent.
1287 * Only dirty target pages are sent.
1289 * Returns: Number of pages written.
1291 * @f: QEMUFile where to send the data
1292 * @block: pointer to block that contains the page we want to send
1293 * @offset: offset inside the block for the page; updated to last target page
1295 * @last_stage: if we are at the completion stage
1296 * @bytes_transferred: increase it with the number of transferred bytes
1297 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1299 static int ram_save_host_page(MigrationState
*ms
, QEMUFile
*f
,
1300 PageSearchStatus
*pss
,
1302 uint64_t *bytes_transferred
,
1303 ram_addr_t dirty_ram_abs
)
1305 int tmppages
, pages
= 0;
1307 tmppages
= ram_save_target_page(ms
, f
, pss
, last_stage
,
1308 bytes_transferred
, dirty_ram_abs
);
1314 pss
->offset
+= TARGET_PAGE_SIZE
;
1315 dirty_ram_abs
+= TARGET_PAGE_SIZE
;
1316 } while (pss
->offset
& (qemu_host_page_size
- 1));
1318 /* The offset we leave with is the last one we looked at */
1319 pss
->offset
-= TARGET_PAGE_SIZE
;
1324 * ram_find_and_save_block: Finds a dirty page and sends it to f
1326 * Called within an RCU critical section.
1328 * Returns: The number of pages written
1329 * 0 means no dirty pages
1331 * @f: QEMUFile where to send the data
1332 * @last_stage: if we are at the completion stage
1333 * @bytes_transferred: increase it with the number of transferred bytes
1335 * On systems where host-page-size > target-page-size it will send all the
1336 * pages in a host page that are dirty.
1339 static int ram_find_and_save_block(QEMUFile
*f
, bool last_stage
,
1340 uint64_t *bytes_transferred
)
1342 PageSearchStatus pss
;
1343 MigrationState
*ms
= migrate_get_current();
1346 ram_addr_t dirty_ram_abs
; /* Address of the start of the dirty page in
1349 /* No dirty page as there is zero RAM */
1350 if (!ram_bytes_total()) {
1354 pss
.block
= last_seen_block
;
1355 pss
.offset
= last_offset
;
1356 pss
.complete_round
= false;
1359 pss
.block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1364 found
= get_queued_page(ms
, &pss
, &dirty_ram_abs
);
1367 /* priority queue empty, so just search for something dirty */
1368 found
= find_dirty_block(f
, &pss
, &again
, &dirty_ram_abs
);
1372 pages
= ram_save_host_page(ms
, f
, &pss
,
1373 last_stage
, bytes_transferred
,
1376 } while (!pages
&& again
);
1378 last_seen_block
= pss
.block
;
1379 last_offset
= pss
.offset
;
1384 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
1386 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
1388 acct_info
.dup_pages
+= pages
;
1390 acct_info
.norm_pages
+= pages
;
1391 bytes_transferred
+= size
;
1392 qemu_update_position(f
, size
);
1396 static ram_addr_t
ram_save_remaining(void)
1398 return migration_dirty_pages
;
1401 uint64_t ram_bytes_remaining(void)
1403 return ram_save_remaining() * TARGET_PAGE_SIZE
;
1406 uint64_t ram_bytes_transferred(void)
1408 return bytes_transferred
;
1411 uint64_t ram_bytes_total(void)
1417 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
)
1418 total
+= block
->used_length
;
1423 void free_xbzrle_decoded_buf(void)
1425 g_free(xbzrle_decoded_buf
);
1426 xbzrle_decoded_buf
= NULL
;
1429 static void migration_bitmap_free(struct BitmapRcu
*bmap
)
1432 g_free(bmap
->unsentmap
);
1436 static void ram_migration_cleanup(void *opaque
)
1438 /* caller have hold iothread lock or is in a bh, so there is
1439 * no writing race against this migration_bitmap
1441 struct BitmapRcu
*bitmap
= migration_bitmap_rcu
;
1442 atomic_rcu_set(&migration_bitmap_rcu
, NULL
);
1444 memory_global_dirty_log_stop();
1445 call_rcu(bitmap
, migration_bitmap_free
, rcu
);
1448 XBZRLE_cache_lock();
1450 cache_fini(XBZRLE
.cache
);
1451 g_free(XBZRLE
.encoded_buf
);
1452 g_free(XBZRLE
.current_buf
);
1453 g_free(ZERO_TARGET_PAGE
);
1454 XBZRLE
.cache
= NULL
;
1455 XBZRLE
.encoded_buf
= NULL
;
1456 XBZRLE
.current_buf
= NULL
;
1458 XBZRLE_cache_unlock();
1461 static void reset_ram_globals(void)
1463 last_seen_block
= NULL
;
1464 last_sent_block
= NULL
;
1466 last_version
= ram_list
.version
;
1467 ram_bulk_stage
= true;
1470 #define MAX_WAIT 50 /* ms, half buffered_file limit */
1472 void migration_bitmap_extend(ram_addr_t old
, ram_addr_t
new)
1474 /* called in qemu main thread, so there is
1475 * no writing race against this migration_bitmap
1477 if (migration_bitmap_rcu
) {
1478 struct BitmapRcu
*old_bitmap
= migration_bitmap_rcu
, *bitmap
;
1479 bitmap
= g_new(struct BitmapRcu
, 1);
1480 bitmap
->bmap
= bitmap_new(new);
1482 /* prevent migration_bitmap content from being set bit
1483 * by migration_bitmap_sync_range() at the same time.
1484 * it is safe to migration if migration_bitmap is cleared bit
1487 qemu_mutex_lock(&migration_bitmap_mutex
);
1488 bitmap_copy(bitmap
->bmap
, old_bitmap
->bmap
, old
);
1489 bitmap_set(bitmap
->bmap
, old
, new - old
);
1491 /* We don't have a way to safely extend the sentmap
1492 * with RCU; so mark it as missing, entry to postcopy
1495 bitmap
->unsentmap
= NULL
;
1497 atomic_rcu_set(&migration_bitmap_rcu
, bitmap
);
1498 qemu_mutex_unlock(&migration_bitmap_mutex
);
1499 migration_dirty_pages
+= new - old
;
1500 call_rcu(old_bitmap
, migration_bitmap_free
, rcu
);
1505 * 'expected' is the value you expect the bitmap mostly to be full
1506 * of; it won't bother printing lines that are all this value.
1507 * If 'todump' is null the migration bitmap is dumped.
1509 void ram_debug_dump_bitmap(unsigned long *todump
, bool expected
)
1511 int64_t ram_pages
= last_ram_offset() >> TARGET_PAGE_BITS
;
1514 int64_t linelen
= 128;
1518 todump
= atomic_rcu_read(&migration_bitmap_rcu
)->bmap
;
1521 for (cur
= 0; cur
< ram_pages
; cur
+= linelen
) {
1525 * Last line; catch the case where the line length
1526 * is longer than remaining ram
1528 if (cur
+ linelen
> ram_pages
) {
1529 linelen
= ram_pages
- cur
;
1531 for (curb
= 0; curb
< linelen
; curb
++) {
1532 bool thisbit
= test_bit(cur
+ curb
, todump
);
1533 linebuf
[curb
] = thisbit
? '1' : '.';
1534 found
= found
|| (thisbit
!= expected
);
1537 linebuf
[curb
] = '\0';
1538 fprintf(stderr
, "0x%08" PRIx64
" : %s\n", cur
, linebuf
);
1543 /* **** functions for postcopy ***** */
1545 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
1547 struct RAMBlock
*block
;
1548 unsigned long *bitmap
= atomic_rcu_read(&migration_bitmap_rcu
)->bmap
;
1550 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1551 unsigned long first
= block
->offset
>> TARGET_PAGE_BITS
;
1552 unsigned long range
= first
+ (block
->used_length
>> TARGET_PAGE_BITS
);
1553 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, first
);
1555 while (run_start
< range
) {
1556 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
1557 ram_discard_range(NULL
, block
->idstr
, run_start
<< TARGET_PAGE_BITS
,
1558 (run_end
- run_start
) << TARGET_PAGE_BITS
);
1559 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
1565 * Callback from postcopy_each_ram_send_discard for each RAMBlock
1566 * Note: At this point the 'unsentmap' is the processed bitmap combined
1567 * with the dirtymap; so a '1' means it's either dirty or unsent.
1568 * start,length: Indexes into the bitmap for the first bit
1569 * representing the named block and length in target-pages
1571 static int postcopy_send_discard_bm_ram(MigrationState
*ms
,
1572 PostcopyDiscardState
*pds
,
1573 unsigned long start
,
1574 unsigned long length
)
1576 unsigned long end
= start
+ length
; /* one after the end */
1577 unsigned long current
;
1578 unsigned long *unsentmap
;
1580 unsentmap
= atomic_rcu_read(&migration_bitmap_rcu
)->unsentmap
;
1581 for (current
= start
; current
< end
; ) {
1582 unsigned long one
= find_next_bit(unsentmap
, end
, current
);
1585 unsigned long zero
= find_next_zero_bit(unsentmap
, end
, one
+ 1);
1586 unsigned long discard_length
;
1589 discard_length
= end
- one
;
1591 discard_length
= zero
- one
;
1593 if (discard_length
) {
1594 postcopy_discard_send_range(ms
, pds
, one
, discard_length
);
1596 current
= one
+ discard_length
;
1606 * Utility for the outgoing postcopy code.
1607 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1608 * passing it bitmap indexes and name.
1609 * Returns: 0 on success
1610 * (qemu_ram_foreach_block ends up passing unscaled lengths
1611 * which would mean postcopy code would have to deal with target page)
1613 static int postcopy_each_ram_send_discard(MigrationState
*ms
)
1615 struct RAMBlock
*block
;
1618 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1619 unsigned long first
= block
->offset
>> TARGET_PAGE_BITS
;
1620 PostcopyDiscardState
*pds
= postcopy_discard_send_init(ms
,
1625 * Postcopy sends chunks of bitmap over the wire, but it
1626 * just needs indexes at this point, avoids it having
1627 * target page specific code.
1629 ret
= postcopy_send_discard_bm_ram(ms
, pds
, first
,
1630 block
->used_length
>> TARGET_PAGE_BITS
);
1631 postcopy_discard_send_finish(ms
, pds
);
1641 * Helper for postcopy_chunk_hostpages; it's called twice to cleanup
1642 * the two bitmaps, that are similar, but one is inverted.
1644 * We search for runs of target-pages that don't start or end on a
1645 * host page boundary;
1646 * unsent_pass=true: Cleans up partially unsent host pages by searching
1648 * unsent_pass=false: Cleans up partially dirty host pages by searching
1649 * the main migration bitmap
1652 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, bool unsent_pass
,
1654 PostcopyDiscardState
*pds
)
1656 unsigned long *bitmap
;
1657 unsigned long *unsentmap
;
1658 unsigned int host_ratio
= qemu_host_page_size
/ TARGET_PAGE_SIZE
;
1659 unsigned long first
= block
->offset
>> TARGET_PAGE_BITS
;
1660 unsigned long len
= block
->used_length
>> TARGET_PAGE_BITS
;
1661 unsigned long last
= first
+ (len
- 1);
1662 unsigned long run_start
;
1664 bitmap
= atomic_rcu_read(&migration_bitmap_rcu
)->bmap
;
1665 unsentmap
= atomic_rcu_read(&migration_bitmap_rcu
)->unsentmap
;
1668 /* Find a sent page */
1669 run_start
= find_next_zero_bit(unsentmap
, last
+ 1, first
);
1671 /* Find a dirty page */
1672 run_start
= find_next_bit(bitmap
, last
+ 1, first
);
1675 while (run_start
<= last
) {
1676 bool do_fixup
= false;
1677 unsigned long fixup_start_addr
;
1678 unsigned long host_offset
;
1681 * If the start of this run of pages is in the middle of a host
1682 * page, then we need to fixup this host page.
1684 host_offset
= run_start
% host_ratio
;
1687 run_start
-= host_offset
;
1688 fixup_start_addr
= run_start
;
1689 /* For the next pass */
1690 run_start
= run_start
+ host_ratio
;
1692 /* Find the end of this run */
1693 unsigned long run_end
;
1695 run_end
= find_next_bit(unsentmap
, last
+ 1, run_start
+ 1);
1697 run_end
= find_next_zero_bit(bitmap
, last
+ 1, run_start
+ 1);
1700 * If the end isn't at the start of a host page, then the
1701 * run doesn't finish at the end of a host page
1702 * and we need to discard.
1704 host_offset
= run_end
% host_ratio
;
1707 fixup_start_addr
= run_end
- host_offset
;
1709 * This host page has gone, the next loop iteration starts
1710 * from after the fixup
1712 run_start
= fixup_start_addr
+ host_ratio
;
1715 * No discards on this iteration, next loop starts from
1716 * next sent/dirty page
1718 run_start
= run_end
+ 1;
1725 /* Tell the destination to discard this page */
1726 if (unsent_pass
|| !test_bit(fixup_start_addr
, unsentmap
)) {
1727 /* For the unsent_pass we:
1728 * discard partially sent pages
1729 * For the !unsent_pass (dirty) we:
1730 * discard partially dirty pages that were sent
1731 * (any partially sent pages were already discarded
1732 * by the previous unsent_pass)
1734 postcopy_discard_send_range(ms
, pds
, fixup_start_addr
,
1738 /* Clean up the bitmap */
1739 for (page
= fixup_start_addr
;
1740 page
< fixup_start_addr
+ host_ratio
; page
++) {
1741 /* All pages in this host page are now not sent */
1742 set_bit(page
, unsentmap
);
1745 * Remark them as dirty, updating the count for any pages
1746 * that weren't previously dirty.
1748 migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
1753 /* Find the next sent page for the next iteration */
1754 run_start
= find_next_zero_bit(unsentmap
, last
+ 1,
1757 /* Find the next dirty page for the next iteration */
1758 run_start
= find_next_bit(bitmap
, last
+ 1, run_start
);
1764 * Utility for the outgoing postcopy code.
1766 * Discard any partially sent host-page size chunks, mark any partially
1767 * dirty host-page size chunks as all dirty.
1769 * Returns: 0 on success
1771 static int postcopy_chunk_hostpages(MigrationState
*ms
)
1773 struct RAMBlock
*block
;
1775 if (qemu_host_page_size
== TARGET_PAGE_SIZE
) {
1776 /* Easy case - TPS==HPS - nothing to be done */
1780 /* Easiest way to make sure we don't resume in the middle of a host-page */
1781 last_seen_block
= NULL
;
1782 last_sent_block
= NULL
;
1785 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1786 unsigned long first
= block
->offset
>> TARGET_PAGE_BITS
;
1788 PostcopyDiscardState
*pds
=
1789 postcopy_discard_send_init(ms
, first
, block
->idstr
);
1791 /* First pass: Discard all partially sent host pages */
1792 postcopy_chunk_hostpages_pass(ms
, true, block
, pds
);
1794 * Second pass: Ensure that all partially dirty host pages are made
1797 postcopy_chunk_hostpages_pass(ms
, false, block
, pds
);
1799 postcopy_discard_send_finish(ms
, pds
);
1800 } /* ram_list loop */
1806 * Transmit the set of pages to be discarded after precopy to the target
1807 * these are pages that:
1808 * a) Have been previously transmitted but are now dirty again
1809 * b) Pages that have never been transmitted, this ensures that
1810 * any pages on the destination that have been mapped by background
1811 * tasks get discarded (transparent huge pages is the specific concern)
1812 * Hopefully this is pretty sparse
1814 int ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
1817 unsigned long *bitmap
, *unsentmap
;
1821 /* This should be our last sync, the src is now paused */
1822 migration_bitmap_sync();
1824 unsentmap
= atomic_rcu_read(&migration_bitmap_rcu
)->unsentmap
;
1826 /* We don't have a safe way to resize the sentmap, so
1827 * if the bitmap was resized it will be NULL at this
1830 error_report("migration ram resized during precopy phase");
1835 /* Deal with TPS != HPS */
1836 ret
= postcopy_chunk_hostpages(ms
);
1843 * Update the unsentmap to be unsentmap = unsentmap | dirty
1845 bitmap
= atomic_rcu_read(&migration_bitmap_rcu
)->bmap
;
1846 bitmap_or(unsentmap
, unsentmap
, bitmap
,
1847 last_ram_offset() >> TARGET_PAGE_BITS
);
1850 trace_ram_postcopy_send_discard_bitmap();
1851 #ifdef DEBUG_POSTCOPY
1852 ram_debug_dump_bitmap(unsentmap
, true);
1855 ret
= postcopy_each_ram_send_discard(ms
);
1862 * At the start of the postcopy phase of migration, any now-dirty
1863 * precopied pages are discarded.
1865 * start, length describe a byte address range within the RAMBlock
1867 * Returns 0 on success.
1869 int ram_discard_range(MigrationIncomingState
*mis
,
1870 const char *block_name
,
1871 uint64_t start
, size_t length
)
1876 RAMBlock
*rb
= qemu_ram_block_by_name(block_name
);
1879 error_report("ram_discard_range: Failed to find block '%s'",
1884 uint8_t *host_startaddr
= rb
->host
+ start
;
1886 if ((uintptr_t)host_startaddr
& (qemu_host_page_size
- 1)) {
1887 error_report("ram_discard_range: Unaligned start address: %p",
1892 if ((start
+ length
) <= rb
->used_length
) {
1893 uint8_t *host_endaddr
= host_startaddr
+ length
;
1894 if ((uintptr_t)host_endaddr
& (qemu_host_page_size
- 1)) {
1895 error_report("ram_discard_range: Unaligned end address: %p",
1899 ret
= postcopy_ram_discard_range(mis
, host_startaddr
, length
);
1901 error_report("ram_discard_range: Overrun block '%s' (%" PRIu64
1902 "/%zx/" RAM_ADDR_FMT
")",
1903 block_name
, start
, length
, rb
->used_length
);
1912 static int ram_save_init_globals(void)
1914 int64_t ram_bitmap_pages
; /* Size of bitmap in pages, including gaps */
1916 dirty_rate_high_cnt
= 0;
1917 bitmap_sync_count
= 0;
1918 migration_bitmap_sync_init();
1919 qemu_mutex_init(&migration_bitmap_mutex
);
1921 if (migrate_use_xbzrle()) {
1922 XBZRLE_cache_lock();
1923 ZERO_TARGET_PAGE
= g_malloc0(TARGET_PAGE_SIZE
);
1924 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size() /
1927 if (!XBZRLE
.cache
) {
1928 XBZRLE_cache_unlock();
1929 error_report("Error creating cache");
1932 XBZRLE_cache_unlock();
1934 /* We prefer not to abort if there is no memory */
1935 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
1936 if (!XBZRLE
.encoded_buf
) {
1937 error_report("Error allocating encoded_buf");
1941 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
1942 if (!XBZRLE
.current_buf
) {
1943 error_report("Error allocating current_buf");
1944 g_free(XBZRLE
.encoded_buf
);
1945 XBZRLE
.encoded_buf
= NULL
;
1952 /* For memory_global_dirty_log_start below. */
1953 qemu_mutex_lock_iothread();
1955 qemu_mutex_lock_ramlist();
1957 bytes_transferred
= 0;
1958 reset_ram_globals();
1960 migration_bitmap_rcu
= g_new0(struct BitmapRcu
, 1);
1961 /* Skip setting bitmap if there is no RAM */
1962 if (ram_bytes_total()) {
1963 ram_bitmap_pages
= last_ram_offset() >> TARGET_PAGE_BITS
;
1964 migration_bitmap_rcu
->bmap
= bitmap_new(ram_bitmap_pages
);
1965 bitmap_set(migration_bitmap_rcu
->bmap
, 0, ram_bitmap_pages
);
1967 if (migrate_postcopy_ram()) {
1968 migration_bitmap_rcu
->unsentmap
= bitmap_new(ram_bitmap_pages
);
1969 bitmap_set(migration_bitmap_rcu
->unsentmap
, 0, ram_bitmap_pages
);
1974 * Count the total number of pages used by ram blocks not including any
1975 * gaps due to alignment or unplugs.
1977 migration_dirty_pages
= ram_bytes_total() >> TARGET_PAGE_BITS
;
1979 memory_global_dirty_log_start();
1980 migration_bitmap_sync();
1981 qemu_mutex_unlock_ramlist();
1982 qemu_mutex_unlock_iothread();
1988 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1989 * long-running RCU critical section. When rcu-reclaims in the code
1990 * start to become numerous it will be necessary to reduce the
1991 * granularity of these critical sections.
1994 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
1998 /* migration has already setup the bitmap, reuse it. */
1999 if (!migration_in_colo_state()) {
2000 if (ram_save_init_globals() < 0) {
2007 qemu_put_be64(f
, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE
);
2009 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
2010 qemu_put_byte(f
, strlen(block
->idstr
));
2011 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
2012 qemu_put_be64(f
, block
->used_length
);
2017 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
2018 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
2020 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
2025 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
2033 if (ram_list
.version
!= last_version
) {
2034 reset_ram_globals();
2037 /* Read version before ram_list.blocks */
2040 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
2042 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
2044 while ((ret
= qemu_file_rate_limit(f
)) == 0) {
2047 pages
= ram_find_and_save_block(f
, false, &bytes_transferred
);
2048 /* no more pages to sent */
2053 acct_info
.iterations
++;
2055 /* we want to check in the 1st loop, just in case it was the 1st time
2056 and we had to sync the dirty bitmap.
2057 qemu_get_clock_ns() is a bit expensive, so we only check each some
2060 if ((i
& 63) == 0) {
2061 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) / 1000000;
2062 if (t1
> MAX_WAIT
) {
2063 trace_ram_save_iterate_big_wait(t1
, i
);
2069 flush_compressed_data(f
);
2073 * Must occur before EOS (or any QEMUFile operation)
2074 * because of RDMA protocol.
2076 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
2078 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
2079 bytes_transferred
+= 8;
2081 ret
= qemu_file_get_error(f
);
2089 /* Called with iothread lock */
2090 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
2094 if (!migration_in_postcopy(migrate_get_current())) {
2095 migration_bitmap_sync();
2098 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
2100 /* try transferring iterative blocks of memory */
2102 /* flush all remaining blocks regardless of rate limiting */
2106 pages
= ram_find_and_save_block(f
, !migration_in_colo_state(),
2107 &bytes_transferred
);
2108 /* no more blocks to sent */
2114 flush_compressed_data(f
);
2115 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
2119 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
2124 static void ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
,
2125 uint64_t *non_postcopiable_pending
,
2126 uint64_t *postcopiable_pending
)
2128 uint64_t remaining_size
;
2130 remaining_size
= ram_save_remaining() * TARGET_PAGE_SIZE
;
2132 if (!migration_in_postcopy(migrate_get_current()) &&
2133 remaining_size
< max_size
) {
2134 qemu_mutex_lock_iothread();
2136 migration_bitmap_sync();
2138 qemu_mutex_unlock_iothread();
2139 remaining_size
= ram_save_remaining() * TARGET_PAGE_SIZE
;
2142 /* We can do postcopy, and all the data is postcopiable */
2143 *postcopiable_pending
+= remaining_size
;
2146 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
2148 unsigned int xh_len
;
2150 uint8_t *loaded_data
;
2152 if (!xbzrle_decoded_buf
) {
2153 xbzrle_decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2155 loaded_data
= xbzrle_decoded_buf
;
2157 /* extract RLE header */
2158 xh_flags
= qemu_get_byte(f
);
2159 xh_len
= qemu_get_be16(f
);
2161 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
2162 error_report("Failed to load XBZRLE page - wrong compression!");
2166 if (xh_len
> TARGET_PAGE_SIZE
) {
2167 error_report("Failed to load XBZRLE page - len overflow!");
2170 /* load data and decode */
2171 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
2174 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
2175 TARGET_PAGE_SIZE
) == -1) {
2176 error_report("Failed to load XBZRLE page - decode error!");
2183 /* Must be called from within a rcu critical section.
2184 * Returns a pointer from within the RCU-protected ram_list.
2187 * Read a RAMBlock ID from the stream f.
2189 * f: Stream to read from
2190 * flags: Page flags (mostly to see if it's a continuation of previous block)
2192 static inline RAMBlock
*ram_block_from_stream(QEMUFile
*f
,
2195 static RAMBlock
*block
= NULL
;
2199 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
2201 error_report("Ack, bad migration stream!");
2207 len
= qemu_get_byte(f
);
2208 qemu_get_buffer(f
, (uint8_t *)id
, len
);
2211 block
= qemu_ram_block_by_name(id
);
2213 error_report("Can't find block %s", id
);
2220 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
2223 if (!offset_in_ramblock(block
, offset
)) {
2227 return block
->host
+ offset
;
2231 * If a page (or a whole RDMA chunk) has been
2232 * determined to be zero, then zap it.
2234 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
2236 if (ch
!= 0 || !is_zero_range(host
, size
)) {
2237 memset(host
, ch
, size
);
2241 static void *do_data_decompress(void *opaque
)
2243 DecompressParam
*param
= opaque
;
2244 unsigned long pagesize
;
2248 qemu_mutex_lock(¶m
->mutex
);
2249 while (!param
->quit
) {
2254 qemu_mutex_unlock(¶m
->mutex
);
2256 pagesize
= TARGET_PAGE_SIZE
;
2257 /* uncompress() will return failed in some case, especially
2258 * when the page is dirted when doing the compression, it's
2259 * not a problem because the dirty page will be retransferred
2260 * and uncompress() won't break the data in other pages.
2262 uncompress((Bytef
*)des
, &pagesize
,
2263 (const Bytef
*)param
->compbuf
, len
);
2265 qemu_mutex_lock(&decomp_done_lock
);
2267 qemu_cond_signal(&decomp_done_cond
);
2268 qemu_mutex_unlock(&decomp_done_lock
);
2270 qemu_mutex_lock(¶m
->mutex
);
2272 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
2275 qemu_mutex_unlock(¶m
->mutex
);
2280 static void wait_for_decompress_done(void)
2282 int idx
, thread_count
;
2284 if (!migrate_use_compression()) {
2288 thread_count
= migrate_decompress_threads();
2289 qemu_mutex_lock(&decomp_done_lock
);
2290 for (idx
= 0; idx
< thread_count
; idx
++) {
2291 while (!decomp_param
[idx
].done
) {
2292 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
2295 qemu_mutex_unlock(&decomp_done_lock
);
2298 void migrate_decompress_threads_create(void)
2300 int i
, thread_count
;
2302 thread_count
= migrate_decompress_threads();
2303 decompress_threads
= g_new0(QemuThread
, thread_count
);
2304 decomp_param
= g_new0(DecompressParam
, thread_count
);
2305 qemu_mutex_init(&decomp_done_lock
);
2306 qemu_cond_init(&decomp_done_cond
);
2307 for (i
= 0; i
< thread_count
; i
++) {
2308 qemu_mutex_init(&decomp_param
[i
].mutex
);
2309 qemu_cond_init(&decomp_param
[i
].cond
);
2310 decomp_param
[i
].compbuf
= g_malloc0(compressBound(TARGET_PAGE_SIZE
));
2311 decomp_param
[i
].done
= true;
2312 decomp_param
[i
].quit
= false;
2313 qemu_thread_create(decompress_threads
+ i
, "decompress",
2314 do_data_decompress
, decomp_param
+ i
,
2315 QEMU_THREAD_JOINABLE
);
2319 void migrate_decompress_threads_join(void)
2321 int i
, thread_count
;
2323 thread_count
= migrate_decompress_threads();
2324 for (i
= 0; i
< thread_count
; i
++) {
2325 qemu_mutex_lock(&decomp_param
[i
].mutex
);
2326 decomp_param
[i
].quit
= true;
2327 qemu_cond_signal(&decomp_param
[i
].cond
);
2328 qemu_mutex_unlock(&decomp_param
[i
].mutex
);
2330 for (i
= 0; i
< thread_count
; i
++) {
2331 qemu_thread_join(decompress_threads
+ i
);
2332 qemu_mutex_destroy(&decomp_param
[i
].mutex
);
2333 qemu_cond_destroy(&decomp_param
[i
].cond
);
2334 g_free(decomp_param
[i
].compbuf
);
2336 g_free(decompress_threads
);
2337 g_free(decomp_param
);
2338 decompress_threads
= NULL
;
2339 decomp_param
= NULL
;
2342 static void decompress_data_with_multi_threads(QEMUFile
*f
,
2343 void *host
, int len
)
2345 int idx
, thread_count
;
2347 thread_count
= migrate_decompress_threads();
2348 qemu_mutex_lock(&decomp_done_lock
);
2350 for (idx
= 0; idx
< thread_count
; idx
++) {
2351 if (decomp_param
[idx
].done
) {
2352 decomp_param
[idx
].done
= false;
2353 qemu_mutex_lock(&decomp_param
[idx
].mutex
);
2354 qemu_get_buffer(f
, decomp_param
[idx
].compbuf
, len
);
2355 decomp_param
[idx
].des
= host
;
2356 decomp_param
[idx
].len
= len
;
2357 qemu_cond_signal(&decomp_param
[idx
].cond
);
2358 qemu_mutex_unlock(&decomp_param
[idx
].mutex
);
2362 if (idx
< thread_count
) {
2365 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
2368 qemu_mutex_unlock(&decomp_done_lock
);
2372 * Allocate data structures etc needed by incoming migration with postcopy-ram
2373 * postcopy-ram's similarly names postcopy_ram_incoming_init does the work
2375 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
2377 size_t ram_pages
= last_ram_offset() >> TARGET_PAGE_BITS
;
2379 return postcopy_ram_incoming_init(mis
, ram_pages
);
2383 * Called in postcopy mode by ram_load().
2384 * rcu_read_lock is taken prior to this being called.
2386 static int ram_load_postcopy(QEMUFile
*f
)
2388 int flags
= 0, ret
= 0;
2389 bool place_needed
= false;
2390 bool matching_page_sizes
= qemu_host_page_size
== TARGET_PAGE_SIZE
;
2391 MigrationIncomingState
*mis
= migration_incoming_get_current();
2392 /* Temporary page that is later 'placed' */
2393 void *postcopy_host_page
= postcopy_get_tmp_page(mis
);
2394 void *last_host
= NULL
;
2395 bool all_zero
= false;
2397 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
2400 void *page_buffer
= NULL
;
2401 void *place_source
= NULL
;
2404 addr
= qemu_get_be64(f
);
2405 flags
= addr
& ~TARGET_PAGE_MASK
;
2406 addr
&= TARGET_PAGE_MASK
;
2408 trace_ram_load_postcopy_loop((uint64_t)addr
, flags
);
2409 place_needed
= false;
2410 if (flags
& (RAM_SAVE_FLAG_COMPRESS
| RAM_SAVE_FLAG_PAGE
)) {
2411 RAMBlock
*block
= ram_block_from_stream(f
, flags
);
2413 host
= host_from_ram_block_offset(block
, addr
);
2415 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
2420 * Postcopy requires that we place whole host pages atomically.
2421 * To make it atomic, the data is read into a temporary page
2422 * that's moved into place later.
2423 * The migration protocol uses, possibly smaller, target-pages
2424 * however the source ensures it always sends all the components
2425 * of a host page in order.
2427 page_buffer
= postcopy_host_page
+
2428 ((uintptr_t)host
& ~qemu_host_page_mask
);
2429 /* If all TP are zero then we can optimise the place */
2430 if (!((uintptr_t)host
& ~qemu_host_page_mask
)) {
2433 /* not the 1st TP within the HP */
2434 if (host
!= (last_host
+ TARGET_PAGE_SIZE
)) {
2435 error_report("Non-sequential target page %p/%p",
2444 * If it's the last part of a host page then we place the host
2447 place_needed
= (((uintptr_t)host
+ TARGET_PAGE_SIZE
) &
2448 ~qemu_host_page_mask
) == 0;
2449 place_source
= postcopy_host_page
;
2453 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
2454 case RAM_SAVE_FLAG_COMPRESS
:
2455 ch
= qemu_get_byte(f
);
2456 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
2462 case RAM_SAVE_FLAG_PAGE
:
2464 if (!place_needed
|| !matching_page_sizes
) {
2465 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
2467 /* Avoids the qemu_file copy during postcopy, which is
2468 * going to do a copy later; can only do it when we
2469 * do this read in one go (matching page sizes)
2471 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
2475 case RAM_SAVE_FLAG_EOS
:
2479 error_report("Unknown combination of migration flags: %#x"
2480 " (postcopy mode)", flags
);
2485 /* This gets called at the last target page in the host page */
2487 ret
= postcopy_place_page_zero(mis
,
2488 host
+ TARGET_PAGE_SIZE
-
2489 qemu_host_page_size
);
2491 ret
= postcopy_place_page(mis
, host
+ TARGET_PAGE_SIZE
-
2492 qemu_host_page_size
,
2497 ret
= qemu_file_get_error(f
);
2504 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
2506 int flags
= 0, ret
= 0;
2507 static uint64_t seq_iter
;
2510 * If system is running in postcopy mode, page inserts to host memory must
2513 bool postcopy_running
= postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING
;
2517 if (version_id
!= 4) {
2521 /* This RCU critical section can be very long running.
2522 * When RCU reclaims in the code start to become numerous,
2523 * it will be necessary to reduce the granularity of this
2528 if (postcopy_running
) {
2529 ret
= ram_load_postcopy(f
);
2532 while (!postcopy_running
&& !ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
2533 ram_addr_t addr
, total_ram_bytes
;
2537 addr
= qemu_get_be64(f
);
2538 flags
= addr
& ~TARGET_PAGE_MASK
;
2539 addr
&= TARGET_PAGE_MASK
;
2541 if (flags
& (RAM_SAVE_FLAG_COMPRESS
| RAM_SAVE_FLAG_PAGE
|
2542 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
2543 RAMBlock
*block
= ram_block_from_stream(f
, flags
);
2545 host
= host_from_ram_block_offset(block
, addr
);
2547 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
2553 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
2554 case RAM_SAVE_FLAG_MEM_SIZE
:
2555 /* Synchronize RAM block list */
2556 total_ram_bytes
= addr
;
2557 while (!ret
&& total_ram_bytes
) {
2562 len
= qemu_get_byte(f
);
2563 qemu_get_buffer(f
, (uint8_t *)id
, len
);
2565 length
= qemu_get_be64(f
);
2567 block
= qemu_ram_block_by_name(id
);
2569 if (length
!= block
->used_length
) {
2570 Error
*local_err
= NULL
;
2572 ret
= qemu_ram_resize(block
, length
,
2575 error_report_err(local_err
);
2578 ram_control_load_hook(f
, RAM_CONTROL_BLOCK_REG
,
2581 error_report("Unknown ramblock \"%s\", cannot "
2582 "accept migration", id
);
2586 total_ram_bytes
-= length
;
2590 case RAM_SAVE_FLAG_COMPRESS
:
2591 ch
= qemu_get_byte(f
);
2592 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
2595 case RAM_SAVE_FLAG_PAGE
:
2596 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
2599 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
2600 len
= qemu_get_be32(f
);
2601 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
2602 error_report("Invalid compressed data length: %d", len
);
2606 decompress_data_with_multi_threads(f
, host
, len
);
2609 case RAM_SAVE_FLAG_XBZRLE
:
2610 if (load_xbzrle(f
, addr
, host
) < 0) {
2611 error_report("Failed to decompress XBZRLE page at "
2612 RAM_ADDR_FMT
, addr
);
2617 case RAM_SAVE_FLAG_EOS
:
2621 if (flags
& RAM_SAVE_FLAG_HOOK
) {
2622 ram_control_load_hook(f
, RAM_CONTROL_HOOK
, NULL
);
2624 error_report("Unknown combination of migration flags: %#x",
2630 ret
= qemu_file_get_error(f
);
2634 wait_for_decompress_done();
2636 trace_ram_load_complete(ret
, seq_iter
);
2640 static SaveVMHandlers savevm_ram_handlers
= {
2641 .save_live_setup
= ram_save_setup
,
2642 .save_live_iterate
= ram_save_iterate
,
2643 .save_live_complete_postcopy
= ram_save_complete
,
2644 .save_live_complete_precopy
= ram_save_complete
,
2645 .save_live_pending
= ram_save_pending
,
2646 .load_state
= ram_load
,
2647 .cleanup
= ram_migration_cleanup
,
2650 void ram_mig_init(void)
2652 qemu_mutex_init(&XBZRLE
.lock
);
2653 register_savevm_live(NULL
, "ram", 0, 4, &savevm_ram_handlers
, NULL
);