4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
38 #include "migration.h"
40 #include "migration/register.h"
41 #include "migration/misc.h"
42 #include "qemu-file.h"
43 #include "postcopy-ram.h"
44 #include "page_cache.h"
45 #include "qemu/error-report.h"
46 #include "qapi/error.h"
47 #include "qapi/qapi-events-migration.h"
48 #include "qapi/qmp/qerror.h"
50 #include "exec/ram_addr.h"
51 #include "exec/target_page.h"
52 #include "qemu/rcu_queue.h"
53 #include "migration/colo.h"
55 #include "sysemu/sysemu.h"
56 #include "qemu/uuid.h"
59 /***********************************************************/
60 /* ram save/restore */
62 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
63 * worked for pages that where filled with the same char. We switched
64 * it to only search for the zero value. And to avoid confusion with
65 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
69 #define RAM_SAVE_FLAG_ZERO 0x02
70 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
71 #define RAM_SAVE_FLAG_PAGE 0x08
72 #define RAM_SAVE_FLAG_EOS 0x10
73 #define RAM_SAVE_FLAG_CONTINUE 0x20
74 #define RAM_SAVE_FLAG_XBZRLE 0x40
75 /* 0x80 is reserved in migration.h start with 0x100 next */
76 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
78 static inline bool is_zero_range(uint8_t *p
, uint64_t size
)
80 return buffer_is_zero(p
, size
);
83 XBZRLECacheStats xbzrle_counters
;
85 /* struct contains XBZRLE cache and a static page
86 used by the compression */
88 /* buffer used for XBZRLE encoding */
90 /* buffer for storing page content */
92 /* Cache for XBZRLE, Protected by lock. */
95 /* it will store a page full of zeros */
96 uint8_t *zero_target_page
;
97 /* buffer used for XBZRLE decoding */
101 static void XBZRLE_cache_lock(void)
103 if (migrate_use_xbzrle())
104 qemu_mutex_lock(&XBZRLE
.lock
);
107 static void XBZRLE_cache_unlock(void)
109 if (migrate_use_xbzrle())
110 qemu_mutex_unlock(&XBZRLE
.lock
);
114 * xbzrle_cache_resize: resize the xbzrle cache
116 * This function is called from qmp_migrate_set_cache_size in main
117 * thread, possibly while a migration is in progress. A running
118 * migration may be using the cache and might finish during this call,
119 * hence changes to the cache are protected by XBZRLE.lock().
121 * Returns 0 for success or -1 for error
123 * @new_size: new cache size
124 * @errp: set *errp if the check failed, with reason
126 int xbzrle_cache_resize(int64_t new_size
, Error
**errp
)
128 PageCache
*new_cache
;
131 /* Check for truncation */
132 if (new_size
!= (size_t)new_size
) {
133 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
134 "exceeding address space");
138 if (new_size
== migrate_xbzrle_cache_size()) {
145 if (XBZRLE
.cache
!= NULL
) {
146 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
152 cache_fini(XBZRLE
.cache
);
153 XBZRLE
.cache
= new_cache
;
156 XBZRLE_cache_unlock();
160 /* Should be holding either ram_list.mutex, or the RCU lock. */
161 #define RAMBLOCK_FOREACH_MIGRATABLE(block) \
162 INTERNAL_RAMBLOCK_FOREACH(block) \
163 if (!qemu_ram_is_migratable(block)) {} else
165 #undef RAMBLOCK_FOREACH
167 static void ramblock_recv_map_init(void)
171 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
172 assert(!rb
->receivedmap
);
173 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
177 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
179 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
183 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
185 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
188 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
190 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
193 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
196 bitmap_set_atomic(rb
->receivedmap
,
197 ramblock_recv_bitmap_offset(host_addr
, rb
),
201 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
204 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
206 * Returns >0 if success with sent bytes, or <0 if error.
208 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
209 const char *block_name
)
211 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
212 unsigned long *le_bitmap
, nbits
;
216 error_report("%s: invalid block name: %s", __func__
, block_name
);
220 nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
223 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
224 * machines we may need 4 more bytes for padding (see below
225 * comment). So extend it a bit before hand.
227 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
230 * Always use little endian when sending the bitmap. This is
231 * required that when source and destination VMs are not using the
232 * same endianess. (Note: big endian won't work.)
234 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
236 /* Size of the bitmap, in bytes */
240 * size is always aligned to 8 bytes for 64bit machines, but it
241 * may not be true for 32bit machines. We need this padding to
242 * make sure the migration can survive even between 32bit and
245 size
= ROUND_UP(size
, 8);
247 qemu_put_be64(file
, size
);
248 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
250 * Mark as an end, in case the middle part is screwed up due to
251 * some "misterious" reason.
253 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
258 if (qemu_file_get_error(file
)) {
259 return qemu_file_get_error(file
);
262 return size
+ sizeof(size
);
266 * An outstanding page request, on the source, having been received
269 struct RAMSrcPageRequest
{
274 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
277 /* State of RAM for migration */
279 /* QEMUFile used for this migration */
281 /* Last block that we have visited searching for dirty pages */
282 RAMBlock
*last_seen_block
;
283 /* Last block from where we have sent data */
284 RAMBlock
*last_sent_block
;
285 /* Last dirty target page we have sent */
286 ram_addr_t last_page
;
287 /* last ram version we have seen */
288 uint32_t last_version
;
289 /* We are in the first round */
291 /* How many times we have dirty too many pages */
292 int dirty_rate_high_cnt
;
293 /* these variables are used for bitmap sync */
294 /* last time we did a full bitmap_sync */
295 int64_t time_last_bitmap_sync
;
296 /* bytes transferred at start_time */
297 uint64_t bytes_xfer_prev
;
298 /* number of dirty pages since start_time */
299 uint64_t num_dirty_pages_period
;
300 /* xbzrle misses since the beginning of the period */
301 uint64_t xbzrle_cache_miss_prev
;
302 /* number of iterations at the beginning of period */
303 uint64_t iterations_prev
;
304 /* Iterations since start */
306 /* number of dirty bits in the bitmap */
307 uint64_t migration_dirty_pages
;
308 /* protects modification of the bitmap */
309 QemuMutex bitmap_mutex
;
310 /* The RAMBlock used in the last src_page_requests */
311 RAMBlock
*last_req_rb
;
312 /* Queue of outstanding page requests from the destination */
313 QemuMutex src_page_req_mutex
;
314 QSIMPLEQ_HEAD(src_page_requests
, RAMSrcPageRequest
) src_page_requests
;
316 typedef struct RAMState RAMState
;
318 static RAMState
*ram_state
;
320 uint64_t ram_bytes_remaining(void)
322 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
326 MigrationStats ram_counters
;
328 /* used by the search for pages to send */
329 struct PageSearchStatus
{
330 /* Current block being searched */
332 /* Current page to search from */
334 /* Set once we wrap around */
337 typedef struct PageSearchStatus PageSearchStatus
;
339 struct CompressParam
{
348 /* internally used fields */
352 typedef struct CompressParam CompressParam
;
354 struct DecompressParam
{
364 typedef struct DecompressParam DecompressParam
;
366 static CompressParam
*comp_param
;
367 static QemuThread
*compress_threads
;
368 /* comp_done_cond is used to wake up the migration thread when
369 * one of the compression threads has finished the compression.
370 * comp_done_lock is used to co-work with comp_done_cond.
372 static QemuMutex comp_done_lock
;
373 static QemuCond comp_done_cond
;
374 /* The empty QEMUFileOps will be used by file in CompressParam */
375 static const QEMUFileOps empty_ops
= { };
377 static QEMUFile
*decomp_file
;
378 static DecompressParam
*decomp_param
;
379 static QemuThread
*decompress_threads
;
380 static QemuMutex decomp_done_lock
;
381 static QemuCond decomp_done_cond
;
383 static int do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
384 ram_addr_t offset
, uint8_t *source_buf
);
386 static void *do_data_compress(void *opaque
)
388 CompressParam
*param
= opaque
;
392 qemu_mutex_lock(¶m
->mutex
);
393 while (!param
->quit
) {
395 block
= param
->block
;
396 offset
= param
->offset
;
398 qemu_mutex_unlock(¶m
->mutex
);
400 do_compress_ram_page(param
->file
, ¶m
->stream
, block
, offset
,
403 qemu_mutex_lock(&comp_done_lock
);
405 qemu_cond_signal(&comp_done_cond
);
406 qemu_mutex_unlock(&comp_done_lock
);
408 qemu_mutex_lock(¶m
->mutex
);
410 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
413 qemu_mutex_unlock(¶m
->mutex
);
418 static inline void terminate_compression_threads(void)
420 int idx
, thread_count
;
422 thread_count
= migrate_compress_threads();
424 for (idx
= 0; idx
< thread_count
; idx
++) {
425 qemu_mutex_lock(&comp_param
[idx
].mutex
);
426 comp_param
[idx
].quit
= true;
427 qemu_cond_signal(&comp_param
[idx
].cond
);
428 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
432 static void compress_threads_save_cleanup(void)
436 if (!migrate_use_compression()) {
439 terminate_compression_threads();
440 thread_count
= migrate_compress_threads();
441 for (i
= 0; i
< thread_count
; i
++) {
443 * we use it as a indicator which shows if the thread is
444 * properly init'd or not
446 if (!comp_param
[i
].file
) {
449 qemu_thread_join(compress_threads
+ i
);
450 qemu_mutex_destroy(&comp_param
[i
].mutex
);
451 qemu_cond_destroy(&comp_param
[i
].cond
);
452 deflateEnd(&comp_param
[i
].stream
);
453 g_free(comp_param
[i
].originbuf
);
454 qemu_fclose(comp_param
[i
].file
);
455 comp_param
[i
].file
= NULL
;
457 qemu_mutex_destroy(&comp_done_lock
);
458 qemu_cond_destroy(&comp_done_cond
);
459 g_free(compress_threads
);
461 compress_threads
= NULL
;
465 static int compress_threads_save_setup(void)
469 if (!migrate_use_compression()) {
472 thread_count
= migrate_compress_threads();
473 compress_threads
= g_new0(QemuThread
, thread_count
);
474 comp_param
= g_new0(CompressParam
, thread_count
);
475 qemu_cond_init(&comp_done_cond
);
476 qemu_mutex_init(&comp_done_lock
);
477 for (i
= 0; i
< thread_count
; i
++) {
478 comp_param
[i
].originbuf
= g_try_malloc(TARGET_PAGE_SIZE
);
479 if (!comp_param
[i
].originbuf
) {
483 if (deflateInit(&comp_param
[i
].stream
,
484 migrate_compress_level()) != Z_OK
) {
485 g_free(comp_param
[i
].originbuf
);
489 /* comp_param[i].file is just used as a dummy buffer to save data,
490 * set its ops to empty.
492 comp_param
[i
].file
= qemu_fopen_ops(NULL
, &empty_ops
);
493 comp_param
[i
].done
= true;
494 comp_param
[i
].quit
= false;
495 qemu_mutex_init(&comp_param
[i
].mutex
);
496 qemu_cond_init(&comp_param
[i
].cond
);
497 qemu_thread_create(compress_threads
+ i
, "compress",
498 do_data_compress
, comp_param
+ i
,
499 QEMU_THREAD_JOINABLE
);
504 compress_threads_save_cleanup();
510 #define MULTIFD_MAGIC 0x11223344U
511 #define MULTIFD_VERSION 1
516 unsigned char uuid
[16]; /* QemuUUID */
518 } __attribute__((packed
)) MultiFDInit_t
;
529 } __attribute__((packed
)) MultiFDPacket_t
;
532 /* number of used pages */
534 /* number of allocated pages */
536 /* global number of generated multifd packets */
538 /* offset of each page */
540 /* pointer to each page */
546 /* this fields are not changed once the thread is created */
549 /* channel thread name */
551 /* channel thread id */
553 /* communication channel */
555 /* sem where to wait for more work */
557 /* this mutex protects the following parameters */
559 /* is this channel thread running */
561 /* should this thread finish */
563 /* array of pages to sent */
564 MultiFDPages_t
*pages
;
565 /* packet allocated len */
567 /* pointer to the packet */
568 MultiFDPacket_t
*packet
;
569 /* multifd flags for each packet */
571 /* global number of generated multifd packets */
576 /* this fields are not changed once the thread is created */
579 /* channel thread name */
581 /* channel thread id */
583 /* communication channel */
585 /* sem where to wait for more work */
587 /* this mutex protects the following parameters */
589 /* is this channel thread running */
591 /* should this thread finish */
593 /* array of pages to receive */
594 MultiFDPages_t
*pages
;
595 /* packet allocated len */
597 /* pointer to the packet */
598 MultiFDPacket_t
*packet
;
599 /* multifd flags for each packet */
601 /* global number of generated multifd packets */
605 static int multifd_send_initial_packet(MultiFDSendParams
*p
, Error
**errp
)
610 msg
.magic
= cpu_to_be32(MULTIFD_MAGIC
);
611 msg
.version
= cpu_to_be32(MULTIFD_VERSION
);
613 memcpy(msg
.uuid
, &qemu_uuid
.data
, sizeof(msg
.uuid
));
615 ret
= qio_channel_write_all(p
->c
, (char *)&msg
, sizeof(msg
), errp
);
622 static int multifd_recv_initial_packet(QIOChannel
*c
, Error
**errp
)
627 ret
= qio_channel_read_all(c
, (char *)&msg
, sizeof(msg
), errp
);
632 be32_to_cpus(&msg
.magic
);
633 be32_to_cpus(&msg
.version
);
635 if (msg
.magic
!= MULTIFD_MAGIC
) {
636 error_setg(errp
, "multifd: received packet magic %x "
637 "expected %x", msg
.magic
, MULTIFD_MAGIC
);
641 if (msg
.version
!= MULTIFD_VERSION
) {
642 error_setg(errp
, "multifd: received packet version %d "
643 "expected %d", msg
.version
, MULTIFD_VERSION
);
647 if (memcmp(msg
.uuid
, &qemu_uuid
, sizeof(qemu_uuid
))) {
648 char *uuid
= qemu_uuid_unparse_strdup(&qemu_uuid
);
649 char *msg_uuid
= qemu_uuid_unparse_strdup((const QemuUUID
*)msg
.uuid
);
651 error_setg(errp
, "multifd: received uuid '%s' and expected "
652 "uuid '%s' for channel %hhd", msg_uuid
, uuid
, msg
.id
);
658 if (msg
.id
> migrate_multifd_channels()) {
659 error_setg(errp
, "multifd: received channel version %d "
660 "expected %d", msg
.version
, MULTIFD_VERSION
);
667 static MultiFDPages_t
*multifd_pages_init(size_t size
)
669 MultiFDPages_t
*pages
= g_new0(MultiFDPages_t
, 1);
671 pages
->allocated
= size
;
672 pages
->iov
= g_new0(struct iovec
, size
);
673 pages
->offset
= g_new0(ram_addr_t
, size
);
678 static void multifd_pages_clear(MultiFDPages_t
*pages
)
681 pages
->allocated
= 0;
682 pages
->packet_num
= 0;
686 g_free(pages
->offset
);
687 pages
->offset
= NULL
;
691 static void multifd_send_fill_packet(MultiFDSendParams
*p
)
693 MultiFDPacket_t
*packet
= p
->packet
;
696 packet
->magic
= cpu_to_be32(MULTIFD_MAGIC
);
697 packet
->version
= cpu_to_be32(MULTIFD_VERSION
);
698 packet
->flags
= cpu_to_be32(p
->flags
);
699 packet
->size
= cpu_to_be32(migrate_multifd_page_count());
700 packet
->used
= cpu_to_be32(p
->pages
->used
);
701 packet
->packet_num
= cpu_to_be64(p
->packet_num
);
703 if (p
->pages
->block
) {
704 strncpy(packet
->ramblock
, p
->pages
->block
->idstr
, 256);
707 for (i
= 0; i
< p
->pages
->used
; i
++) {
708 packet
->offset
[i
] = cpu_to_be64(p
->pages
->offset
[i
]);
712 static int multifd_recv_unfill_packet(MultiFDRecvParams
*p
, Error
**errp
)
714 MultiFDPacket_t
*packet
= p
->packet
;
718 /* ToDo: We can't use it until we haven't received a message */
721 be32_to_cpus(&packet
->magic
);
722 if (packet
->magic
!= MULTIFD_MAGIC
) {
723 error_setg(errp
, "multifd: received packet "
724 "magic %x and expected magic %x",
725 packet
->magic
, MULTIFD_MAGIC
);
729 be32_to_cpus(&packet
->version
);
730 if (packet
->version
!= MULTIFD_VERSION
) {
731 error_setg(errp
, "multifd: received packet "
732 "version %d and expected version %d",
733 packet
->version
, MULTIFD_VERSION
);
737 p
->flags
= be32_to_cpu(packet
->flags
);
739 be32_to_cpus(&packet
->size
);
740 if (packet
->size
> migrate_multifd_page_count()) {
741 error_setg(errp
, "multifd: received packet "
742 "with size %d and expected maximum size %d",
743 packet
->size
, migrate_multifd_page_count()) ;
747 p
->pages
->used
= be32_to_cpu(packet
->used
);
748 if (p
->pages
->used
> packet
->size
) {
749 error_setg(errp
, "multifd: received packet "
750 "with size %d and expected maximum size %d",
751 p
->pages
->used
, packet
->size
) ;
755 p
->packet_num
= be64_to_cpu(packet
->packet_num
);
757 if (p
->pages
->used
) {
758 /* make sure that ramblock is 0 terminated */
759 packet
->ramblock
[255] = 0;
760 block
= qemu_ram_block_by_name(packet
->ramblock
);
762 error_setg(errp
, "multifd: unknown ram block %s",
768 for (i
= 0; i
< p
->pages
->used
; i
++) {
769 ram_addr_t offset
= be64_to_cpu(packet
->offset
[i
]);
771 if (offset
> (block
->used_length
- TARGET_PAGE_SIZE
)) {
772 error_setg(errp
, "multifd: offset too long " RAM_ADDR_FMT
773 " (max " RAM_ADDR_FMT
")",
774 offset
, block
->max_length
);
777 p
->pages
->iov
[i
].iov_base
= block
->host
+ offset
;
778 p
->pages
->iov
[i
].iov_len
= TARGET_PAGE_SIZE
;
785 MultiFDSendParams
*params
;
786 /* number of created threads */
788 /* array of pages to sent */
789 MultiFDPages_t
*pages
;
790 } *multifd_send_state
;
792 static void multifd_send_terminate_threads(Error
*err
)
797 MigrationState
*s
= migrate_get_current();
798 migrate_set_error(s
, err
);
799 if (s
->state
== MIGRATION_STATUS_SETUP
||
800 s
->state
== MIGRATION_STATUS_PRE_SWITCHOVER
||
801 s
->state
== MIGRATION_STATUS_DEVICE
||
802 s
->state
== MIGRATION_STATUS_ACTIVE
) {
803 migrate_set_state(&s
->state
, s
->state
,
804 MIGRATION_STATUS_FAILED
);
808 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
809 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
811 qemu_mutex_lock(&p
->mutex
);
813 qemu_sem_post(&p
->sem
);
814 qemu_mutex_unlock(&p
->mutex
);
818 int multifd_save_cleanup(Error
**errp
)
823 if (!migrate_use_multifd()) {
826 multifd_send_terminate_threads(NULL
);
827 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
828 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
831 qemu_thread_join(&p
->thread
);
833 socket_send_channel_destroy(p
->c
);
835 qemu_mutex_destroy(&p
->mutex
);
836 qemu_sem_destroy(&p
->sem
);
839 multifd_pages_clear(p
->pages
);
845 g_free(multifd_send_state
->params
);
846 multifd_send_state
->params
= NULL
;
847 multifd_pages_clear(multifd_send_state
->pages
);
848 multifd_send_state
->pages
= NULL
;
849 g_free(multifd_send_state
);
850 multifd_send_state
= NULL
;
854 static void *multifd_send_thread(void *opaque
)
856 MultiFDSendParams
*p
= opaque
;
857 Error
*local_err
= NULL
;
859 if (multifd_send_initial_packet(p
, &local_err
) < 0) {
864 qemu_mutex_lock(&p
->mutex
);
865 multifd_send_fill_packet(p
);
867 qemu_mutex_unlock(&p
->mutex
);
870 qemu_mutex_unlock(&p
->mutex
);
871 qemu_sem_wait(&p
->sem
);
876 multifd_send_terminate_threads(local_err
);
879 qemu_mutex_lock(&p
->mutex
);
881 qemu_mutex_unlock(&p
->mutex
);
886 static void multifd_new_send_channel_async(QIOTask
*task
, gpointer opaque
)
888 MultiFDSendParams
*p
= opaque
;
889 QIOChannel
*sioc
= QIO_CHANNEL(qio_task_get_source(task
));
890 Error
*local_err
= NULL
;
892 if (qio_task_propagate_error(task
, &local_err
)) {
893 if (multifd_save_cleanup(&local_err
) != 0) {
894 migrate_set_error(migrate_get_current(), local_err
);
897 p
->c
= QIO_CHANNEL(sioc
);
898 qio_channel_set_delay(p
->c
, false);
900 qemu_thread_create(&p
->thread
, p
->name
, multifd_send_thread
, p
,
901 QEMU_THREAD_JOINABLE
);
903 atomic_inc(&multifd_send_state
->count
);
907 int multifd_save_setup(void)
910 uint32_t page_count
= migrate_multifd_page_count();
913 if (!migrate_use_multifd()) {
916 thread_count
= migrate_multifd_channels();
917 multifd_send_state
= g_malloc0(sizeof(*multifd_send_state
));
918 multifd_send_state
->params
= g_new0(MultiFDSendParams
, thread_count
);
919 atomic_set(&multifd_send_state
->count
, 0);
920 multifd_send_state
->pages
= multifd_pages_init(page_count
);
922 for (i
= 0; i
< thread_count
; i
++) {
923 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
925 qemu_mutex_init(&p
->mutex
);
926 qemu_sem_init(&p
->sem
, 0);
929 p
->pages
= multifd_pages_init(page_count
);
930 p
->packet_len
= sizeof(MultiFDPacket_t
)
931 + sizeof(ram_addr_t
) * page_count
;
932 p
->packet
= g_malloc0(p
->packet_len
);
933 p
->name
= g_strdup_printf("multifdsend_%d", i
);
934 socket_send_channel_create(multifd_new_send_channel_async
, p
);
940 MultiFDRecvParams
*params
;
941 /* number of created threads */
943 } *multifd_recv_state
;
945 static void multifd_recv_terminate_threads(Error
*err
)
950 MigrationState
*s
= migrate_get_current();
951 migrate_set_error(s
, err
);
952 if (s
->state
== MIGRATION_STATUS_SETUP
||
953 s
->state
== MIGRATION_STATUS_ACTIVE
) {
954 migrate_set_state(&s
->state
, s
->state
,
955 MIGRATION_STATUS_FAILED
);
959 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
960 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
962 qemu_mutex_lock(&p
->mutex
);
964 qemu_sem_post(&p
->sem
);
965 qemu_mutex_unlock(&p
->mutex
);
969 int multifd_load_cleanup(Error
**errp
)
974 if (!migrate_use_multifd()) {
977 multifd_recv_terminate_threads(NULL
);
978 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
979 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
982 qemu_thread_join(&p
->thread
);
984 object_unref(OBJECT(p
->c
));
986 qemu_mutex_destroy(&p
->mutex
);
987 qemu_sem_destroy(&p
->sem
);
990 multifd_pages_clear(p
->pages
);
996 g_free(multifd_recv_state
->params
);
997 multifd_recv_state
->params
= NULL
;
998 g_free(multifd_recv_state
);
999 multifd_recv_state
= NULL
;
1004 static void *multifd_recv_thread(void *opaque
)
1006 MultiFDRecvParams
*p
= opaque
;
1007 Error
*local_err
= NULL
;
1011 qemu_mutex_lock(&p
->mutex
);
1013 /* ToDo: Packet reception goes here */
1015 ret
= multifd_recv_unfill_packet(p
, &local_err
);
1016 qemu_mutex_unlock(&p
->mutex
);
1020 } else if (p
->quit
) {
1021 qemu_mutex_unlock(&p
->mutex
);
1024 qemu_mutex_unlock(&p
->mutex
);
1025 qemu_sem_wait(&p
->sem
);
1028 qemu_mutex_lock(&p
->mutex
);
1030 qemu_mutex_unlock(&p
->mutex
);
1035 int multifd_load_setup(void)
1038 uint32_t page_count
= migrate_multifd_page_count();
1041 if (!migrate_use_multifd()) {
1044 thread_count
= migrate_multifd_channels();
1045 multifd_recv_state
= g_malloc0(sizeof(*multifd_recv_state
));
1046 multifd_recv_state
->params
= g_new0(MultiFDRecvParams
, thread_count
);
1047 atomic_set(&multifd_recv_state
->count
, 0);
1049 for (i
= 0; i
< thread_count
; i
++) {
1050 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1052 qemu_mutex_init(&p
->mutex
);
1053 qemu_sem_init(&p
->sem
, 0);
1056 p
->pages
= multifd_pages_init(page_count
);
1057 p
->packet_len
= sizeof(MultiFDPacket_t
)
1058 + sizeof(ram_addr_t
) * page_count
;
1059 p
->packet
= g_malloc0(p
->packet_len
);
1060 p
->name
= g_strdup_printf("multifdrecv_%d", i
);
1065 bool multifd_recv_all_channels_created(void)
1067 int thread_count
= migrate_multifd_channels();
1069 if (!migrate_use_multifd()) {
1073 return thread_count
== atomic_read(&multifd_recv_state
->count
);
1076 void multifd_recv_new_channel(QIOChannel
*ioc
)
1078 MultiFDRecvParams
*p
;
1079 Error
*local_err
= NULL
;
1082 id
= multifd_recv_initial_packet(ioc
, &local_err
);
1084 multifd_recv_terminate_threads(local_err
);
1088 p
= &multifd_recv_state
->params
[id
];
1090 error_setg(&local_err
, "multifd: received id '%d' already setup'",
1092 multifd_recv_terminate_threads(local_err
);
1096 object_ref(OBJECT(ioc
));
1099 qemu_thread_create(&p
->thread
, p
->name
, multifd_recv_thread
, p
,
1100 QEMU_THREAD_JOINABLE
);
1101 atomic_inc(&multifd_recv_state
->count
);
1102 if (multifd_recv_state
->count
== migrate_multifd_channels()) {
1103 migration_incoming_process();
1108 * save_page_header: write page header to wire
1110 * If this is the 1st block, it also writes the block identification
1112 * Returns the number of bytes written
1114 * @f: QEMUFile where to send the data
1115 * @block: block that contains the page we want to send
1116 * @offset: offset inside the block for the page
1117 * in the lower bits, it contains flags
1119 static size_t save_page_header(RAMState
*rs
, QEMUFile
*f
, RAMBlock
*block
,
1124 if (block
== rs
->last_sent_block
) {
1125 offset
|= RAM_SAVE_FLAG_CONTINUE
;
1127 qemu_put_be64(f
, offset
);
1130 if (!(offset
& RAM_SAVE_FLAG_CONTINUE
)) {
1131 len
= strlen(block
->idstr
);
1132 qemu_put_byte(f
, len
);
1133 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
1135 rs
->last_sent_block
= block
;
1141 * mig_throttle_guest_down: throotle down the guest
1143 * Reduce amount of guest cpu execution to hopefully slow down memory
1144 * writes. If guest dirty memory rate is reduced below the rate at
1145 * which we can transfer pages to the destination then we should be
1146 * able to complete migration. Some workloads dirty memory way too
1147 * fast and will not effectively converge, even with auto-converge.
1149 static void mig_throttle_guest_down(void)
1151 MigrationState
*s
= migrate_get_current();
1152 uint64_t pct_initial
= s
->parameters
.cpu_throttle_initial
;
1153 uint64_t pct_icrement
= s
->parameters
.cpu_throttle_increment
;
1155 /* We have not started throttling yet. Let's start it. */
1156 if (!cpu_throttle_active()) {
1157 cpu_throttle_set(pct_initial
);
1159 /* Throttling already on, just increase the rate */
1160 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement
);
1165 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1167 * @rs: current RAM state
1168 * @current_addr: address for the zero page
1170 * Update the xbzrle cache to reflect a page that's been sent as all 0.
1171 * The important thing is that a stale (not-yet-0'd) page be replaced
1173 * As a bonus, if the page wasn't in the cache it gets added so that
1174 * when a small write is made into the 0'd page it gets XBZRLE sent.
1176 static void xbzrle_cache_zero_page(RAMState
*rs
, ram_addr_t current_addr
)
1178 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
1182 /* We don't care if this fails to allocate a new cache page
1183 * as long as it updated an old one */
1184 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
1185 ram_counters
.dirty_sync_count
);
1188 #define ENCODING_FLAG_XBZRLE 0x1
1191 * save_xbzrle_page: compress and send current page
1193 * Returns: 1 means that we wrote the page
1194 * 0 means that page is identical to the one already sent
1195 * -1 means that xbzrle would be longer than normal
1197 * @rs: current RAM state
1198 * @current_data: pointer to the address of the page contents
1199 * @current_addr: addr of the page
1200 * @block: block that contains the page we want to send
1201 * @offset: offset inside the block for the page
1202 * @last_stage: if we are at the completion stage
1204 static int save_xbzrle_page(RAMState
*rs
, uint8_t **current_data
,
1205 ram_addr_t current_addr
, RAMBlock
*block
,
1206 ram_addr_t offset
, bool last_stage
)
1208 int encoded_len
= 0, bytes_xbzrle
;
1209 uint8_t *prev_cached_page
;
1211 if (!cache_is_cached(XBZRLE
.cache
, current_addr
,
1212 ram_counters
.dirty_sync_count
)) {
1213 xbzrle_counters
.cache_miss
++;
1215 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
1216 ram_counters
.dirty_sync_count
) == -1) {
1219 /* update *current_data when the page has been
1220 inserted into cache */
1221 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
1227 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
1229 /* save current buffer into memory */
1230 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
1232 /* XBZRLE encoding (if there is no overflow) */
1233 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
1234 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
1236 if (encoded_len
== 0) {
1237 trace_save_xbzrle_page_skipping();
1239 } else if (encoded_len
== -1) {
1240 trace_save_xbzrle_page_overflow();
1241 xbzrle_counters
.overflow
++;
1242 /* update data in the cache */
1244 memcpy(prev_cached_page
, *current_data
, TARGET_PAGE_SIZE
);
1245 *current_data
= prev_cached_page
;
1250 /* we need to update the data in the cache, in order to get the same data */
1252 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
1255 /* Send XBZRLE based compressed page */
1256 bytes_xbzrle
= save_page_header(rs
, rs
->f
, block
,
1257 offset
| RAM_SAVE_FLAG_XBZRLE
);
1258 qemu_put_byte(rs
->f
, ENCODING_FLAG_XBZRLE
);
1259 qemu_put_be16(rs
->f
, encoded_len
);
1260 qemu_put_buffer(rs
->f
, XBZRLE
.encoded_buf
, encoded_len
);
1261 bytes_xbzrle
+= encoded_len
+ 1 + 2;
1262 xbzrle_counters
.pages
++;
1263 xbzrle_counters
.bytes
+= bytes_xbzrle
;
1264 ram_counters
.transferred
+= bytes_xbzrle
;
1270 * migration_bitmap_find_dirty: find the next dirty page from start
1272 * Called with rcu_read_lock() to protect migration_bitmap
1274 * Returns the byte offset within memory region of the start of a dirty page
1276 * @rs: current RAM state
1277 * @rb: RAMBlock where to search for dirty pages
1278 * @start: page where we start the search
1281 unsigned long migration_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
1282 unsigned long start
)
1284 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
1285 unsigned long *bitmap
= rb
->bmap
;
1288 if (!qemu_ram_is_migratable(rb
)) {
1292 if (rs
->ram_bulk_stage
&& start
> 0) {
1295 next
= find_next_bit(bitmap
, size
, start
);
1301 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
1307 ret
= test_and_clear_bit(page
, rb
->bmap
);
1310 rs
->migration_dirty_pages
--;
1315 static void migration_bitmap_sync_range(RAMState
*rs
, RAMBlock
*rb
,
1316 ram_addr_t start
, ram_addr_t length
)
1318 rs
->migration_dirty_pages
+=
1319 cpu_physical_memory_sync_dirty_bitmap(rb
, start
, length
,
1320 &rs
->num_dirty_pages_period
);
1324 * ram_pagesize_summary: calculate all the pagesizes of a VM
1326 * Returns a summary bitmap of the page sizes of all RAMBlocks
1328 * For VMs with just normal pages this is equivalent to the host page
1329 * size. If it's got some huge pages then it's the OR of all the
1330 * different page sizes.
1332 uint64_t ram_pagesize_summary(void)
1335 uint64_t summary
= 0;
1337 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
1338 summary
|= block
->page_size
;
1344 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
1346 uint64_t iter_count
= rs
->iterations
- rs
->iterations_prev
;
1348 /* calculate period counters */
1349 ram_counters
.dirty_pages_rate
= rs
->num_dirty_pages_period
* 1000
1350 / (end_time
- rs
->time_last_bitmap_sync
);
1356 if (migrate_use_xbzrle()) {
1357 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
1358 rs
->xbzrle_cache_miss_prev
) / iter_count
;
1359 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
1363 static void migration_bitmap_sync(RAMState
*rs
)
1367 uint64_t bytes_xfer_now
;
1369 ram_counters
.dirty_sync_count
++;
1371 if (!rs
->time_last_bitmap_sync
) {
1372 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1375 trace_migration_bitmap_sync_start();
1376 memory_global_dirty_log_sync();
1378 qemu_mutex_lock(&rs
->bitmap_mutex
);
1380 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
1381 migration_bitmap_sync_range(rs
, block
, 0, block
->used_length
);
1383 ram_counters
.remaining
= ram_bytes_remaining();
1385 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1387 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1389 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1391 /* more than 1 second = 1000 millisecons */
1392 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1393 bytes_xfer_now
= ram_counters
.transferred
;
1395 /* During block migration the auto-converge logic incorrectly detects
1396 * that ram migration makes no progress. Avoid this by disabling the
1397 * throttling logic during the bulk phase of block migration. */
1398 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1399 /* The following detection logic can be refined later. For now:
1400 Check to see if the dirtied bytes is 50% more than the approx.
1401 amount of bytes that just got transferred since the last time we
1402 were in this routine. If that happens twice, start or increase
1405 if ((rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
>
1406 (bytes_xfer_now
- rs
->bytes_xfer_prev
) / 2) &&
1407 (++rs
->dirty_rate_high_cnt
>= 2)) {
1408 trace_migration_throttle();
1409 rs
->dirty_rate_high_cnt
= 0;
1410 mig_throttle_guest_down();
1414 migration_update_rates(rs
, end_time
);
1416 rs
->iterations_prev
= rs
->iterations
;
1418 /* reset period counters */
1419 rs
->time_last_bitmap_sync
= end_time
;
1420 rs
->num_dirty_pages_period
= 0;
1421 rs
->bytes_xfer_prev
= bytes_xfer_now
;
1423 if (migrate_use_events()) {
1424 qapi_event_send_migration_pass(ram_counters
.dirty_sync_count
, NULL
);
1429 * save_zero_page: send the zero page to the stream
1431 * Returns the number of pages written.
1433 * @rs: current RAM state
1434 * @block: block that contains the page we want to send
1435 * @offset: offset inside the block for the page
1437 static int save_zero_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
1439 uint8_t *p
= block
->host
+ offset
;
1442 if (is_zero_range(p
, TARGET_PAGE_SIZE
)) {
1443 ram_counters
.duplicate
++;
1444 ram_counters
.transferred
+=
1445 save_page_header(rs
, rs
->f
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1446 qemu_put_byte(rs
->f
, 0);
1447 ram_counters
.transferred
+= 1;
1454 static void ram_release_pages(const char *rbname
, uint64_t offset
, int pages
)
1456 if (!migrate_release_ram() || !migration_in_postcopy()) {
1460 ram_discard_range(rbname
, offset
, pages
<< TARGET_PAGE_BITS
);
1464 * @pages: the number of pages written by the control path,
1466 * > 0 - number of pages written
1468 * Return true if the pages has been saved, otherwise false is returned.
1470 static bool control_save_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1473 uint64_t bytes_xmit
= 0;
1477 ret
= ram_control_save_page(rs
->f
, block
->offset
, offset
, TARGET_PAGE_SIZE
,
1479 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1484 ram_counters
.transferred
+= bytes_xmit
;
1488 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1492 if (bytes_xmit
> 0) {
1493 ram_counters
.normal
++;
1494 } else if (bytes_xmit
== 0) {
1495 ram_counters
.duplicate
++;
1502 * directly send the page to the stream
1504 * Returns the number of pages written.
1506 * @rs: current RAM state
1507 * @block: block that contains the page we want to send
1508 * @offset: offset inside the block for the page
1509 * @buf: the page to be sent
1510 * @async: send to page asyncly
1512 static int save_normal_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1513 uint8_t *buf
, bool async
)
1515 ram_counters
.transferred
+= save_page_header(rs
, rs
->f
, block
,
1516 offset
| RAM_SAVE_FLAG_PAGE
);
1518 qemu_put_buffer_async(rs
->f
, buf
, TARGET_PAGE_SIZE
,
1519 migrate_release_ram() &
1520 migration_in_postcopy());
1522 qemu_put_buffer(rs
->f
, buf
, TARGET_PAGE_SIZE
);
1524 ram_counters
.transferred
+= TARGET_PAGE_SIZE
;
1525 ram_counters
.normal
++;
1530 * ram_save_page: send the given page to the stream
1532 * Returns the number of pages written.
1534 * >=0 - Number of pages written - this might legally be 0
1535 * if xbzrle noticed the page was the same.
1537 * @rs: current RAM state
1538 * @block: block that contains the page we want to send
1539 * @offset: offset inside the block for the page
1540 * @last_stage: if we are at the completion stage
1542 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
, bool last_stage
)
1546 bool send_async
= true;
1547 RAMBlock
*block
= pss
->block
;
1548 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
1549 ram_addr_t current_addr
= block
->offset
+ offset
;
1551 p
= block
->host
+ offset
;
1552 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1554 XBZRLE_cache_lock();
1555 if (!rs
->ram_bulk_stage
&& !migration_in_postcopy() &&
1556 migrate_use_xbzrle()) {
1557 pages
= save_xbzrle_page(rs
, &p
, current_addr
, block
,
1558 offset
, last_stage
);
1560 /* Can't send this cached data async, since the cache page
1561 * might get updated before it gets to the wire
1567 /* XBZRLE overflow or normal page */
1569 pages
= save_normal_page(rs
, block
, offset
, p
, send_async
);
1572 XBZRLE_cache_unlock();
1577 static int do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
1578 ram_addr_t offset
, uint8_t *source_buf
)
1580 RAMState
*rs
= ram_state
;
1581 int bytes_sent
, blen
;
1582 uint8_t *p
= block
->host
+ (offset
& TARGET_PAGE_MASK
);
1584 bytes_sent
= save_page_header(rs
, f
, block
, offset
|
1585 RAM_SAVE_FLAG_COMPRESS_PAGE
);
1588 * copy it to a internal buffer to avoid it being modified by VM
1589 * so that we can catch up the error during compression and
1592 memcpy(source_buf
, p
, TARGET_PAGE_SIZE
);
1593 blen
= qemu_put_compression_data(f
, stream
, source_buf
, TARGET_PAGE_SIZE
);
1596 qemu_file_set_error(migrate_get_current()->to_dst_file
, blen
);
1597 error_report("compressed data failed!");
1600 ram_release_pages(block
->idstr
, offset
& TARGET_PAGE_MASK
, 1);
1606 static void flush_compressed_data(RAMState
*rs
)
1608 int idx
, len
, thread_count
;
1610 if (!migrate_use_compression()) {
1613 thread_count
= migrate_compress_threads();
1615 qemu_mutex_lock(&comp_done_lock
);
1616 for (idx
= 0; idx
< thread_count
; idx
++) {
1617 while (!comp_param
[idx
].done
) {
1618 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
1621 qemu_mutex_unlock(&comp_done_lock
);
1623 for (idx
= 0; idx
< thread_count
; idx
++) {
1624 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1625 if (!comp_param
[idx
].quit
) {
1626 len
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1627 ram_counters
.transferred
+= len
;
1629 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1633 static inline void set_compress_params(CompressParam
*param
, RAMBlock
*block
,
1636 param
->block
= block
;
1637 param
->offset
= offset
;
1640 static int compress_page_with_multi_thread(RAMState
*rs
, RAMBlock
*block
,
1643 int idx
, thread_count
, bytes_xmit
= -1, pages
= -1;
1645 thread_count
= migrate_compress_threads();
1646 qemu_mutex_lock(&comp_done_lock
);
1648 for (idx
= 0; idx
< thread_count
; idx
++) {
1649 if (comp_param
[idx
].done
) {
1650 comp_param
[idx
].done
= false;
1651 bytes_xmit
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1652 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1653 set_compress_params(&comp_param
[idx
], block
, offset
);
1654 qemu_cond_signal(&comp_param
[idx
].cond
);
1655 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1657 ram_counters
.normal
++;
1658 ram_counters
.transferred
+= bytes_xmit
;
1665 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
1668 qemu_mutex_unlock(&comp_done_lock
);
1674 * find_dirty_block: find the next dirty page and update any state
1675 * associated with the search process.
1677 * Returns if a page is found
1679 * @rs: current RAM state
1680 * @pss: data about the state of the current dirty page scan
1681 * @again: set to false if the search has scanned the whole of RAM
1683 static bool find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
, bool *again
)
1685 pss
->page
= migration_bitmap_find_dirty(rs
, pss
->block
, pss
->page
);
1686 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
1687 pss
->page
>= rs
->last_page
) {
1689 * We've been once around the RAM and haven't found anything.
1695 if ((pss
->page
<< TARGET_PAGE_BITS
) >= pss
->block
->used_length
) {
1696 /* Didn't find anything in this RAM Block */
1698 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
1700 /* Hit the end of the list */
1701 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1702 /* Flag that we've looped */
1703 pss
->complete_round
= true;
1704 rs
->ram_bulk_stage
= false;
1705 if (migrate_use_xbzrle()) {
1706 /* If xbzrle is on, stop using the data compression at this
1707 * point. In theory, xbzrle can do better than compression.
1709 flush_compressed_data(rs
);
1712 /* Didn't find anything this time, but try again on the new block */
1716 /* Can go around again, but... */
1718 /* We've found something so probably don't need to */
1724 * unqueue_page: gets a page of the queue
1726 * Helper for 'get_queued_page' - gets a page off the queue
1728 * Returns the block of the page (or NULL if none available)
1730 * @rs: current RAM state
1731 * @offset: used to return the offset within the RAMBlock
1733 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
1735 RAMBlock
*block
= NULL
;
1737 qemu_mutex_lock(&rs
->src_page_req_mutex
);
1738 if (!QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
1739 struct RAMSrcPageRequest
*entry
=
1740 QSIMPLEQ_FIRST(&rs
->src_page_requests
);
1742 *offset
= entry
->offset
;
1744 if (entry
->len
> TARGET_PAGE_SIZE
) {
1745 entry
->len
-= TARGET_PAGE_SIZE
;
1746 entry
->offset
+= TARGET_PAGE_SIZE
;
1748 memory_region_unref(block
->mr
);
1749 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1751 migration_consume_urgent_request();
1754 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
1760 * get_queued_page: unqueue a page from the postocpy requests
1762 * Skips pages that are already sent (!dirty)
1764 * Returns if a queued page is found
1766 * @rs: current RAM state
1767 * @pss: data about the state of the current dirty page scan
1769 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
1776 block
= unqueue_page(rs
, &offset
);
1778 * We're sending this page, and since it's postcopy nothing else
1779 * will dirty it, and we must make sure it doesn't get sent again
1780 * even if this queue request was received after the background
1781 * search already sent it.
1786 page
= offset
>> TARGET_PAGE_BITS
;
1787 dirty
= test_bit(page
, block
->bmap
);
1789 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
1790 page
, test_bit(page
, block
->unsentmap
));
1792 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
1796 } while (block
&& !dirty
);
1800 * As soon as we start servicing pages out of order, then we have
1801 * to kill the bulk stage, since the bulk stage assumes
1802 * in (migration_bitmap_find_and_reset_dirty) that every page is
1803 * dirty, that's no longer true.
1805 rs
->ram_bulk_stage
= false;
1808 * We want the background search to continue from the queued page
1809 * since the guest is likely to want other pages near to the page
1810 * it just requested.
1813 pss
->page
= offset
>> TARGET_PAGE_BITS
;
1820 * migration_page_queue_free: drop any remaining pages in the ram
1823 * It should be empty at the end anyway, but in error cases there may
1824 * be some left. in case that there is any page left, we drop it.
1827 static void migration_page_queue_free(RAMState
*rs
)
1829 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
1830 /* This queue generally should be empty - but in the case of a failed
1831 * migration might have some droppings in.
1834 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
1835 memory_region_unref(mspr
->rb
->mr
);
1836 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1843 * ram_save_queue_pages: queue the page for transmission
1845 * A request from postcopy destination for example.
1847 * Returns zero on success or negative on error
1849 * @rbname: Name of the RAMBLock of the request. NULL means the
1850 * same that last one.
1851 * @start: starting address from the start of the RAMBlock
1852 * @len: length (in bytes) to send
1854 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
)
1857 RAMState
*rs
= ram_state
;
1859 ram_counters
.postcopy_requests
++;
1862 /* Reuse last RAMBlock */
1863 ramblock
= rs
->last_req_rb
;
1867 * Shouldn't happen, we can't reuse the last RAMBlock if
1868 * it's the 1st request.
1870 error_report("ram_save_queue_pages no previous block");
1874 ramblock
= qemu_ram_block_by_name(rbname
);
1877 /* We shouldn't be asked for a non-existent RAMBlock */
1878 error_report("ram_save_queue_pages no block '%s'", rbname
);
1881 rs
->last_req_rb
= ramblock
;
1883 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
1884 if (start
+len
> ramblock
->used_length
) {
1885 error_report("%s request overrun start=" RAM_ADDR_FMT
" len="
1886 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
1887 __func__
, start
, len
, ramblock
->used_length
);
1891 struct RAMSrcPageRequest
*new_entry
=
1892 g_malloc0(sizeof(struct RAMSrcPageRequest
));
1893 new_entry
->rb
= ramblock
;
1894 new_entry
->offset
= start
;
1895 new_entry
->len
= len
;
1897 memory_region_ref(ramblock
->mr
);
1898 qemu_mutex_lock(&rs
->src_page_req_mutex
);
1899 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
1900 migration_make_urgent_request();
1901 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
1911 static bool save_page_use_compression(RAMState
*rs
)
1913 if (!migrate_use_compression()) {
1918 * If xbzrle is on, stop using the data compression after first
1919 * round of migration even if compression is enabled. In theory,
1920 * xbzrle can do better than compression.
1922 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
1930 * ram_save_target_page: save one target page
1932 * Returns the number of pages written
1934 * @rs: current RAM state
1935 * @pss: data about the page we want to send
1936 * @last_stage: if we are at the completion stage
1938 static int ram_save_target_page(RAMState
*rs
, PageSearchStatus
*pss
,
1941 RAMBlock
*block
= pss
->block
;
1942 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
1945 if (control_save_page(rs
, block
, offset
, &res
)) {
1950 * When starting the process of a new block, the first page of
1951 * the block should be sent out before other pages in the same
1952 * block, and all the pages in last block should have been sent
1953 * out, keeping this order is important, because the 'cont' flag
1954 * is used to avoid resending the block name.
1956 if (block
!= rs
->last_sent_block
&& save_page_use_compression(rs
)) {
1957 flush_compressed_data(rs
);
1960 res
= save_zero_page(rs
, block
, offset
);
1962 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
1963 * page would be stale
1965 if (!save_page_use_compression(rs
)) {
1966 XBZRLE_cache_lock();
1967 xbzrle_cache_zero_page(rs
, block
->offset
+ offset
);
1968 XBZRLE_cache_unlock();
1970 ram_release_pages(block
->idstr
, offset
, res
);
1975 * Make sure the first page is sent out before other pages.
1977 * we post it as normal page as compression will take much
1980 if (block
== rs
->last_sent_block
&& save_page_use_compression(rs
)) {
1981 return compress_page_with_multi_thread(rs
, block
, offset
);
1984 return ram_save_page(rs
, pss
, last_stage
);
1988 * ram_save_host_page: save a whole host page
1990 * Starting at *offset send pages up to the end of the current host
1991 * page. It's valid for the initial offset to point into the middle of
1992 * a host page in which case the remainder of the hostpage is sent.
1993 * Only dirty target pages are sent. Note that the host page size may
1994 * be a huge page for this block.
1995 * The saving stops at the boundary of the used_length of the block
1996 * if the RAMBlock isn't a multiple of the host page size.
1998 * Returns the number of pages written or negative on error
2000 * @rs: current RAM state
2001 * @ms: current migration state
2002 * @pss: data about the page we want to send
2003 * @last_stage: if we are at the completion stage
2005 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
,
2008 int tmppages
, pages
= 0;
2009 size_t pagesize_bits
=
2010 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2012 if (!qemu_ram_is_migratable(pss
->block
)) {
2013 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2018 /* Check the pages is dirty and if it is send it */
2019 if (!migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
)) {
2024 tmppages
= ram_save_target_page(rs
, pss
, last_stage
);
2030 if (pss
->block
->unsentmap
) {
2031 clear_bit(pss
->page
, pss
->block
->unsentmap
);
2035 } while ((pss
->page
& (pagesize_bits
- 1)) &&
2036 offset_in_ramblock(pss
->block
, pss
->page
<< TARGET_PAGE_BITS
));
2038 /* The offset we leave with is the last one we looked at */
2044 * ram_find_and_save_block: finds a dirty page and sends it to f
2046 * Called within an RCU critical section.
2048 * Returns the number of pages written where zero means no dirty pages
2050 * @rs: current RAM state
2051 * @last_stage: if we are at the completion stage
2053 * On systems where host-page-size > target-page-size it will send all the
2054 * pages in a host page that are dirty.
2057 static int ram_find_and_save_block(RAMState
*rs
, bool last_stage
)
2059 PageSearchStatus pss
;
2063 /* No dirty page as there is zero RAM */
2064 if (!ram_bytes_total()) {
2068 pss
.block
= rs
->last_seen_block
;
2069 pss
.page
= rs
->last_page
;
2070 pss
.complete_round
= false;
2073 pss
.block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2078 found
= get_queued_page(rs
, &pss
);
2081 /* priority queue empty, so just search for something dirty */
2082 found
= find_dirty_block(rs
, &pss
, &again
);
2086 pages
= ram_save_host_page(rs
, &pss
, last_stage
);
2088 } while (!pages
&& again
);
2090 rs
->last_seen_block
= pss
.block
;
2091 rs
->last_page
= pss
.page
;
2096 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
2098 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
2101 ram_counters
.duplicate
+= pages
;
2103 ram_counters
.normal
+= pages
;
2104 ram_counters
.transferred
+= size
;
2105 qemu_update_position(f
, size
);
2109 uint64_t ram_bytes_total(void)
2115 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2116 total
+= block
->used_length
;
2122 static void xbzrle_load_setup(void)
2124 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2127 static void xbzrle_load_cleanup(void)
2129 g_free(XBZRLE
.decoded_buf
);
2130 XBZRLE
.decoded_buf
= NULL
;
2133 static void ram_state_cleanup(RAMState
**rsp
)
2136 migration_page_queue_free(*rsp
);
2137 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2138 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2144 static void xbzrle_cleanup(void)
2146 XBZRLE_cache_lock();
2148 cache_fini(XBZRLE
.cache
);
2149 g_free(XBZRLE
.encoded_buf
);
2150 g_free(XBZRLE
.current_buf
);
2151 g_free(XBZRLE
.zero_target_page
);
2152 XBZRLE
.cache
= NULL
;
2153 XBZRLE
.encoded_buf
= NULL
;
2154 XBZRLE
.current_buf
= NULL
;
2155 XBZRLE
.zero_target_page
= NULL
;
2157 XBZRLE_cache_unlock();
2160 static void ram_save_cleanup(void *opaque
)
2162 RAMState
**rsp
= opaque
;
2165 /* caller have hold iothread lock or is in a bh, so there is
2166 * no writing race against this migration_bitmap
2168 memory_global_dirty_log_stop();
2170 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2171 g_free(block
->bmap
);
2173 g_free(block
->unsentmap
);
2174 block
->unsentmap
= NULL
;
2178 compress_threads_save_cleanup();
2179 ram_state_cleanup(rsp
);
2182 static void ram_state_reset(RAMState
*rs
)
2184 rs
->last_seen_block
= NULL
;
2185 rs
->last_sent_block
= NULL
;
2187 rs
->last_version
= ram_list
.version
;
2188 rs
->ram_bulk_stage
= true;
2191 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2194 * 'expected' is the value you expect the bitmap mostly to be full
2195 * of; it won't bother printing lines that are all this value.
2196 * If 'todump' is null the migration bitmap is dumped.
2198 void ram_debug_dump_bitmap(unsigned long *todump
, bool expected
,
2199 unsigned long pages
)
2202 int64_t linelen
= 128;
2205 for (cur
= 0; cur
< pages
; cur
+= linelen
) {
2209 * Last line; catch the case where the line length
2210 * is longer than remaining ram
2212 if (cur
+ linelen
> pages
) {
2213 linelen
= pages
- cur
;
2215 for (curb
= 0; curb
< linelen
; curb
++) {
2216 bool thisbit
= test_bit(cur
+ curb
, todump
);
2217 linebuf
[curb
] = thisbit
? '1' : '.';
2218 found
= found
|| (thisbit
!= expected
);
2221 linebuf
[curb
] = '\0';
2222 fprintf(stderr
, "0x%08" PRIx64
" : %s\n", cur
, linebuf
);
2227 /* **** functions for postcopy ***** */
2229 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2231 struct RAMBlock
*block
;
2233 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2234 unsigned long *bitmap
= block
->bmap
;
2235 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2236 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2238 while (run_start
< range
) {
2239 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2240 ram_discard_range(block
->idstr
, run_start
<< TARGET_PAGE_BITS
,
2241 (run_end
- run_start
) << TARGET_PAGE_BITS
);
2242 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2248 * postcopy_send_discard_bm_ram: discard a RAMBlock
2250 * Returns zero on success
2252 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2253 * Note: At this point the 'unsentmap' is the processed bitmap combined
2254 * with the dirtymap; so a '1' means it's either dirty or unsent.
2256 * @ms: current migration state
2257 * @pds: state for postcopy
2258 * @start: RAMBlock starting page
2259 * @length: RAMBlock size
2261 static int postcopy_send_discard_bm_ram(MigrationState
*ms
,
2262 PostcopyDiscardState
*pds
,
2265 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2266 unsigned long current
;
2267 unsigned long *unsentmap
= block
->unsentmap
;
2269 for (current
= 0; current
< end
; ) {
2270 unsigned long one
= find_next_bit(unsentmap
, end
, current
);
2273 unsigned long zero
= find_next_zero_bit(unsentmap
, end
, one
+ 1);
2274 unsigned long discard_length
;
2277 discard_length
= end
- one
;
2279 discard_length
= zero
- one
;
2281 if (discard_length
) {
2282 postcopy_discard_send_range(ms
, pds
, one
, discard_length
);
2284 current
= one
+ discard_length
;
2294 * postcopy_each_ram_send_discard: discard all RAMBlocks
2296 * Returns 0 for success or negative for error
2298 * Utility for the outgoing postcopy code.
2299 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2300 * passing it bitmap indexes and name.
2301 * (qemu_ram_foreach_block ends up passing unscaled lengths
2302 * which would mean postcopy code would have to deal with target page)
2304 * @ms: current migration state
2306 static int postcopy_each_ram_send_discard(MigrationState
*ms
)
2308 struct RAMBlock
*block
;
2311 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2312 PostcopyDiscardState
*pds
=
2313 postcopy_discard_send_init(ms
, block
->idstr
);
2316 * Postcopy sends chunks of bitmap over the wire, but it
2317 * just needs indexes at this point, avoids it having
2318 * target page specific code.
2320 ret
= postcopy_send_discard_bm_ram(ms
, pds
, block
);
2321 postcopy_discard_send_finish(ms
, pds
);
2331 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2333 * Helper for postcopy_chunk_hostpages; it's called twice to
2334 * canonicalize the two bitmaps, that are similar, but one is
2337 * Postcopy requires that all target pages in a hostpage are dirty or
2338 * clean, not a mix. This function canonicalizes the bitmaps.
2340 * @ms: current migration state
2341 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2342 * otherwise we need to canonicalize partially dirty host pages
2343 * @block: block that contains the page we want to canonicalize
2344 * @pds: state for postcopy
2346 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, bool unsent_pass
,
2348 PostcopyDiscardState
*pds
)
2350 RAMState
*rs
= ram_state
;
2351 unsigned long *bitmap
= block
->bmap
;
2352 unsigned long *unsentmap
= block
->unsentmap
;
2353 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2354 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2355 unsigned long run_start
;
2357 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2358 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2363 /* Find a sent page */
2364 run_start
= find_next_zero_bit(unsentmap
, pages
, 0);
2366 /* Find a dirty page */
2367 run_start
= find_next_bit(bitmap
, pages
, 0);
2370 while (run_start
< pages
) {
2371 bool do_fixup
= false;
2372 unsigned long fixup_start_addr
;
2373 unsigned long host_offset
;
2376 * If the start of this run of pages is in the middle of a host
2377 * page, then we need to fixup this host page.
2379 host_offset
= run_start
% host_ratio
;
2382 run_start
-= host_offset
;
2383 fixup_start_addr
= run_start
;
2384 /* For the next pass */
2385 run_start
= run_start
+ host_ratio
;
2387 /* Find the end of this run */
2388 unsigned long run_end
;
2390 run_end
= find_next_bit(unsentmap
, pages
, run_start
+ 1);
2392 run_end
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2395 * If the end isn't at the start of a host page, then the
2396 * run doesn't finish at the end of a host page
2397 * and we need to discard.
2399 host_offset
= run_end
% host_ratio
;
2402 fixup_start_addr
= run_end
- host_offset
;
2404 * This host page has gone, the next loop iteration starts
2405 * from after the fixup
2407 run_start
= fixup_start_addr
+ host_ratio
;
2410 * No discards on this iteration, next loop starts from
2411 * next sent/dirty page
2413 run_start
= run_end
+ 1;
2420 /* Tell the destination to discard this page */
2421 if (unsent_pass
|| !test_bit(fixup_start_addr
, unsentmap
)) {
2422 /* For the unsent_pass we:
2423 * discard partially sent pages
2424 * For the !unsent_pass (dirty) we:
2425 * discard partially dirty pages that were sent
2426 * (any partially sent pages were already discarded
2427 * by the previous unsent_pass)
2429 postcopy_discard_send_range(ms
, pds
, fixup_start_addr
,
2433 /* Clean up the bitmap */
2434 for (page
= fixup_start_addr
;
2435 page
< fixup_start_addr
+ host_ratio
; page
++) {
2436 /* All pages in this host page are now not sent */
2437 set_bit(page
, unsentmap
);
2440 * Remark them as dirty, updating the count for any pages
2441 * that weren't previously dirty.
2443 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2448 /* Find the next sent page for the next iteration */
2449 run_start
= find_next_zero_bit(unsentmap
, pages
, run_start
);
2451 /* Find the next dirty page for the next iteration */
2452 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2458 * postcopy_chuck_hostpages: discrad any partially sent host page
2460 * Utility for the outgoing postcopy code.
2462 * Discard any partially sent host-page size chunks, mark any partially
2463 * dirty host-page size chunks as all dirty. In this case the host-page
2464 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2466 * Returns zero on success
2468 * @ms: current migration state
2469 * @block: block we want to work with
2471 static int postcopy_chunk_hostpages(MigrationState
*ms
, RAMBlock
*block
)
2473 PostcopyDiscardState
*pds
=
2474 postcopy_discard_send_init(ms
, block
->idstr
);
2476 /* First pass: Discard all partially sent host pages */
2477 postcopy_chunk_hostpages_pass(ms
, true, block
, pds
);
2479 * Second pass: Ensure that all partially dirty host pages are made
2482 postcopy_chunk_hostpages_pass(ms
, false, block
, pds
);
2484 postcopy_discard_send_finish(ms
, pds
);
2489 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2491 * Returns zero on success
2493 * Transmit the set of pages to be discarded after precopy to the target
2494 * these are pages that:
2495 * a) Have been previously transmitted but are now dirty again
2496 * b) Pages that have never been transmitted, this ensures that
2497 * any pages on the destination that have been mapped by background
2498 * tasks get discarded (transparent huge pages is the specific concern)
2499 * Hopefully this is pretty sparse
2501 * @ms: current migration state
2503 int ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2505 RAMState
*rs
= ram_state
;
2511 /* This should be our last sync, the src is now paused */
2512 migration_bitmap_sync(rs
);
2514 /* Easiest way to make sure we don't resume in the middle of a host-page */
2515 rs
->last_seen_block
= NULL
;
2516 rs
->last_sent_block
= NULL
;
2519 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2520 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2521 unsigned long *bitmap
= block
->bmap
;
2522 unsigned long *unsentmap
= block
->unsentmap
;
2525 /* We don't have a safe way to resize the sentmap, so
2526 * if the bitmap was resized it will be NULL at this
2529 error_report("migration ram resized during precopy phase");
2533 /* Deal with TPS != HPS and huge pages */
2534 ret
= postcopy_chunk_hostpages(ms
, block
);
2541 * Update the unsentmap to be unsentmap = unsentmap | dirty
2543 bitmap_or(unsentmap
, unsentmap
, bitmap
, pages
);
2544 #ifdef DEBUG_POSTCOPY
2545 ram_debug_dump_bitmap(unsentmap
, true, pages
);
2548 trace_ram_postcopy_send_discard_bitmap();
2550 ret
= postcopy_each_ram_send_discard(ms
);
2557 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2559 * Returns zero on success
2561 * @rbname: name of the RAMBlock of the request. NULL means the
2562 * same that last one.
2563 * @start: RAMBlock starting page
2564 * @length: RAMBlock size
2566 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2570 trace_ram_discard_range(rbname
, start
, length
);
2573 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2576 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2580 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2581 length
>> qemu_target_page_bits());
2582 ret
= ram_block_discard_range(rb
, start
, length
);
2591 * For every allocation, we will try not to crash the VM if the
2592 * allocation failed.
2594 static int xbzrle_init(void)
2596 Error
*local_err
= NULL
;
2598 if (!migrate_use_xbzrle()) {
2602 XBZRLE_cache_lock();
2604 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2605 if (!XBZRLE
.zero_target_page
) {
2606 error_report("%s: Error allocating zero page", __func__
);
2610 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2611 TARGET_PAGE_SIZE
, &local_err
);
2612 if (!XBZRLE
.cache
) {
2613 error_report_err(local_err
);
2614 goto free_zero_page
;
2617 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
2618 if (!XBZRLE
.encoded_buf
) {
2619 error_report("%s: Error allocating encoded_buf", __func__
);
2623 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
2624 if (!XBZRLE
.current_buf
) {
2625 error_report("%s: Error allocating current_buf", __func__
);
2626 goto free_encoded_buf
;
2629 /* We are all good */
2630 XBZRLE_cache_unlock();
2634 g_free(XBZRLE
.encoded_buf
);
2635 XBZRLE
.encoded_buf
= NULL
;
2637 cache_fini(XBZRLE
.cache
);
2638 XBZRLE
.cache
= NULL
;
2640 g_free(XBZRLE
.zero_target_page
);
2641 XBZRLE
.zero_target_page
= NULL
;
2643 XBZRLE_cache_unlock();
2647 static int ram_state_init(RAMState
**rsp
)
2649 *rsp
= g_try_new0(RAMState
, 1);
2652 error_report("%s: Init ramstate fail", __func__
);
2656 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
2657 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
2658 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
2661 * Count the total number of pages used by ram blocks not including any
2662 * gaps due to alignment or unplugs.
2664 (*rsp
)->migration_dirty_pages
= ram_bytes_total() >> TARGET_PAGE_BITS
;
2666 ram_state_reset(*rsp
);
2671 static void ram_list_init_bitmaps(void)
2674 unsigned long pages
;
2676 /* Skip setting bitmap if there is no RAM */
2677 if (ram_bytes_total()) {
2678 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2679 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
2680 block
->bmap
= bitmap_new(pages
);
2681 bitmap_set(block
->bmap
, 0, pages
);
2682 if (migrate_postcopy_ram()) {
2683 block
->unsentmap
= bitmap_new(pages
);
2684 bitmap_set(block
->unsentmap
, 0, pages
);
2690 static void ram_init_bitmaps(RAMState
*rs
)
2692 /* For memory_global_dirty_log_start below. */
2693 qemu_mutex_lock_iothread();
2694 qemu_mutex_lock_ramlist();
2697 ram_list_init_bitmaps();
2698 memory_global_dirty_log_start();
2699 migration_bitmap_sync(rs
);
2702 qemu_mutex_unlock_ramlist();
2703 qemu_mutex_unlock_iothread();
2706 static int ram_init_all(RAMState
**rsp
)
2708 if (ram_state_init(rsp
)) {
2712 if (xbzrle_init()) {
2713 ram_state_cleanup(rsp
);
2717 ram_init_bitmaps(*rsp
);
2722 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
2728 * Postcopy is not using xbzrle/compression, so no need for that.
2729 * Also, since source are already halted, we don't need to care
2730 * about dirty page logging as well.
2733 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2734 pages
+= bitmap_count_one(block
->bmap
,
2735 block
->used_length
>> TARGET_PAGE_BITS
);
2738 /* This may not be aligned with current bitmaps. Recalculate. */
2739 rs
->migration_dirty_pages
= pages
;
2741 rs
->last_seen_block
= NULL
;
2742 rs
->last_sent_block
= NULL
;
2744 rs
->last_version
= ram_list
.version
;
2746 * Disable the bulk stage, otherwise we'll resend the whole RAM no
2747 * matter what we have sent.
2749 rs
->ram_bulk_stage
= false;
2751 /* Update RAMState cache of output QEMUFile */
2754 trace_ram_state_resume_prepare(pages
);
2758 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
2759 * long-running RCU critical section. When rcu-reclaims in the code
2760 * start to become numerous it will be necessary to reduce the
2761 * granularity of these critical sections.
2765 * ram_save_setup: Setup RAM for migration
2767 * Returns zero to indicate success and negative for error
2769 * @f: QEMUFile where to send the data
2770 * @opaque: RAMState pointer
2772 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
2774 RAMState
**rsp
= opaque
;
2777 if (compress_threads_save_setup()) {
2781 /* migration has already setup the bitmap, reuse it. */
2782 if (!migration_in_colo_state()) {
2783 if (ram_init_all(rsp
) != 0) {
2784 compress_threads_save_cleanup();
2792 qemu_put_be64(f
, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE
);
2794 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2795 qemu_put_byte(f
, strlen(block
->idstr
));
2796 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
2797 qemu_put_be64(f
, block
->used_length
);
2798 if (migrate_postcopy_ram() && block
->page_size
!= qemu_host_page_size
) {
2799 qemu_put_be64(f
, block
->page_size
);
2805 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
2806 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
2808 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
2814 * ram_save_iterate: iterative stage for migration
2816 * Returns zero to indicate success and negative for error
2818 * @f: QEMUFile where to send the data
2819 * @opaque: RAMState pointer
2821 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
2823 RAMState
**temp
= opaque
;
2824 RAMState
*rs
= *temp
;
2830 if (blk_mig_bulk_active()) {
2831 /* Avoid transferring ram during bulk phase of block migration as
2832 * the bulk phase will usually take a long time and transferring
2833 * ram updates during that time is pointless. */
2838 if (ram_list
.version
!= rs
->last_version
) {
2839 ram_state_reset(rs
);
2842 /* Read version before ram_list.blocks */
2845 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
2847 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
2849 while ((ret
= qemu_file_rate_limit(f
)) == 0 ||
2850 !QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
2853 if (qemu_file_get_error(f
)) {
2857 pages
= ram_find_and_save_block(rs
, false);
2858 /* no more pages to sent */
2865 /* we want to check in the 1st loop, just in case it was the 1st time
2866 and we had to sync the dirty bitmap.
2867 qemu_get_clock_ns() is a bit expensive, so we only check each some
2870 if ((i
& 63) == 0) {
2871 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) / 1000000;
2872 if (t1
> MAX_WAIT
) {
2873 trace_ram_save_iterate_big_wait(t1
, i
);
2879 flush_compressed_data(rs
);
2883 * Must occur before EOS (or any QEMUFile operation)
2884 * because of RDMA protocol.
2886 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
2889 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
2890 ram_counters
.transferred
+= 8;
2892 ret
= qemu_file_get_error(f
);
2901 * ram_save_complete: function called to send the remaining amount of ram
2903 * Returns zero to indicate success
2905 * Called with iothread lock
2907 * @f: QEMUFile where to send the data
2908 * @opaque: RAMState pointer
2910 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
2912 RAMState
**temp
= opaque
;
2913 RAMState
*rs
= *temp
;
2917 if (!migration_in_postcopy()) {
2918 migration_bitmap_sync(rs
);
2921 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
2923 /* try transferring iterative blocks of memory */
2925 /* flush all remaining blocks regardless of rate limiting */
2929 pages
= ram_find_and_save_block(rs
, !migration_in_colo_state());
2930 /* no more blocks to sent */
2936 flush_compressed_data(rs
);
2937 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
2941 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
2946 static void ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
,
2947 uint64_t *res_precopy_only
,
2948 uint64_t *res_compatible
,
2949 uint64_t *res_postcopy_only
)
2951 RAMState
**temp
= opaque
;
2952 RAMState
*rs
= *temp
;
2953 uint64_t remaining_size
;
2955 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
2957 if (!migration_in_postcopy() &&
2958 remaining_size
< max_size
) {
2959 qemu_mutex_lock_iothread();
2961 migration_bitmap_sync(rs
);
2963 qemu_mutex_unlock_iothread();
2964 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
2967 if (migrate_postcopy_ram()) {
2968 /* We can do postcopy, and all the data is postcopiable */
2969 *res_compatible
+= remaining_size
;
2971 *res_precopy_only
+= remaining_size
;
2975 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
2977 unsigned int xh_len
;
2979 uint8_t *loaded_data
;
2981 /* extract RLE header */
2982 xh_flags
= qemu_get_byte(f
);
2983 xh_len
= qemu_get_be16(f
);
2985 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
2986 error_report("Failed to load XBZRLE page - wrong compression!");
2990 if (xh_len
> TARGET_PAGE_SIZE
) {
2991 error_report("Failed to load XBZRLE page - len overflow!");
2994 loaded_data
= XBZRLE
.decoded_buf
;
2995 /* load data and decode */
2996 /* it can change loaded_data to point to an internal buffer */
2997 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3000 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3001 TARGET_PAGE_SIZE
) == -1) {
3002 error_report("Failed to load XBZRLE page - decode error!");
3010 * ram_block_from_stream: read a RAMBlock id from the migration stream
3012 * Must be called from within a rcu critical section.
3014 * Returns a pointer from within the RCU-protected ram_list.
3016 * @f: QEMUFile where to read the data from
3017 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3019 static inline RAMBlock
*ram_block_from_stream(QEMUFile
*f
, int flags
)
3021 static RAMBlock
*block
= NULL
;
3025 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3027 error_report("Ack, bad migration stream!");
3033 len
= qemu_get_byte(f
);
3034 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3037 block
= qemu_ram_block_by_name(id
);
3039 error_report("Can't find block %s", id
);
3043 if (!qemu_ram_is_migratable(block
)) {
3044 error_report("block %s should not be migrated !", id
);
3051 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3054 if (!offset_in_ramblock(block
, offset
)) {
3058 return block
->host
+ offset
;
3062 * ram_handle_compressed: handle the zero page case
3064 * If a page (or a whole RDMA chunk) has been
3065 * determined to be zero, then zap it.
3067 * @host: host address for the zero page
3068 * @ch: what the page is filled from. We only support zero
3069 * @size: size of the zero page
3071 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
3073 if (ch
!= 0 || !is_zero_range(host
, size
)) {
3074 memset(host
, ch
, size
);
3078 /* return the size after decompression, or negative value on error */
3080 qemu_uncompress_data(z_stream
*stream
, uint8_t *dest
, size_t dest_len
,
3081 const uint8_t *source
, size_t source_len
)
3085 err
= inflateReset(stream
);
3090 stream
->avail_in
= source_len
;
3091 stream
->next_in
= (uint8_t *)source
;
3092 stream
->avail_out
= dest_len
;
3093 stream
->next_out
= dest
;
3095 err
= inflate(stream
, Z_NO_FLUSH
);
3096 if (err
!= Z_STREAM_END
) {
3100 return stream
->total_out
;
3103 static void *do_data_decompress(void *opaque
)
3105 DecompressParam
*param
= opaque
;
3106 unsigned long pagesize
;
3110 qemu_mutex_lock(¶m
->mutex
);
3111 while (!param
->quit
) {
3116 qemu_mutex_unlock(¶m
->mutex
);
3118 pagesize
= TARGET_PAGE_SIZE
;
3120 ret
= qemu_uncompress_data(¶m
->stream
, des
, pagesize
,
3121 param
->compbuf
, len
);
3122 if (ret
< 0 && migrate_get_current()->decompress_error_check
) {
3123 error_report("decompress data failed");
3124 qemu_file_set_error(decomp_file
, ret
);
3127 qemu_mutex_lock(&decomp_done_lock
);
3129 qemu_cond_signal(&decomp_done_cond
);
3130 qemu_mutex_unlock(&decomp_done_lock
);
3132 qemu_mutex_lock(¶m
->mutex
);
3134 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
3137 qemu_mutex_unlock(¶m
->mutex
);
3142 static int wait_for_decompress_done(void)
3144 int idx
, thread_count
;
3146 if (!migrate_use_compression()) {
3150 thread_count
= migrate_decompress_threads();
3151 qemu_mutex_lock(&decomp_done_lock
);
3152 for (idx
= 0; idx
< thread_count
; idx
++) {
3153 while (!decomp_param
[idx
].done
) {
3154 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3157 qemu_mutex_unlock(&decomp_done_lock
);
3158 return qemu_file_get_error(decomp_file
);
3161 static void compress_threads_load_cleanup(void)
3163 int i
, thread_count
;
3165 if (!migrate_use_compression()) {
3168 thread_count
= migrate_decompress_threads();
3169 for (i
= 0; i
< thread_count
; i
++) {
3171 * we use it as a indicator which shows if the thread is
3172 * properly init'd or not
3174 if (!decomp_param
[i
].compbuf
) {
3178 qemu_mutex_lock(&decomp_param
[i
].mutex
);
3179 decomp_param
[i
].quit
= true;
3180 qemu_cond_signal(&decomp_param
[i
].cond
);
3181 qemu_mutex_unlock(&decomp_param
[i
].mutex
);
3183 for (i
= 0; i
< thread_count
; i
++) {
3184 if (!decomp_param
[i
].compbuf
) {
3188 qemu_thread_join(decompress_threads
+ i
);
3189 qemu_mutex_destroy(&decomp_param
[i
].mutex
);
3190 qemu_cond_destroy(&decomp_param
[i
].cond
);
3191 inflateEnd(&decomp_param
[i
].stream
);
3192 g_free(decomp_param
[i
].compbuf
);
3193 decomp_param
[i
].compbuf
= NULL
;
3195 g_free(decompress_threads
);
3196 g_free(decomp_param
);
3197 decompress_threads
= NULL
;
3198 decomp_param
= NULL
;
3202 static int compress_threads_load_setup(QEMUFile
*f
)
3204 int i
, thread_count
;
3206 if (!migrate_use_compression()) {
3210 thread_count
= migrate_decompress_threads();
3211 decompress_threads
= g_new0(QemuThread
, thread_count
);
3212 decomp_param
= g_new0(DecompressParam
, thread_count
);
3213 qemu_mutex_init(&decomp_done_lock
);
3214 qemu_cond_init(&decomp_done_cond
);
3216 for (i
= 0; i
< thread_count
; i
++) {
3217 if (inflateInit(&decomp_param
[i
].stream
) != Z_OK
) {
3221 decomp_param
[i
].compbuf
= g_malloc0(compressBound(TARGET_PAGE_SIZE
));
3222 qemu_mutex_init(&decomp_param
[i
].mutex
);
3223 qemu_cond_init(&decomp_param
[i
].cond
);
3224 decomp_param
[i
].done
= true;
3225 decomp_param
[i
].quit
= false;
3226 qemu_thread_create(decompress_threads
+ i
, "decompress",
3227 do_data_decompress
, decomp_param
+ i
,
3228 QEMU_THREAD_JOINABLE
);
3232 compress_threads_load_cleanup();
3236 static void decompress_data_with_multi_threads(QEMUFile
*f
,
3237 void *host
, int len
)
3239 int idx
, thread_count
;
3241 thread_count
= migrate_decompress_threads();
3242 qemu_mutex_lock(&decomp_done_lock
);
3244 for (idx
= 0; idx
< thread_count
; idx
++) {
3245 if (decomp_param
[idx
].done
) {
3246 decomp_param
[idx
].done
= false;
3247 qemu_mutex_lock(&decomp_param
[idx
].mutex
);
3248 qemu_get_buffer(f
, decomp_param
[idx
].compbuf
, len
);
3249 decomp_param
[idx
].des
= host
;
3250 decomp_param
[idx
].len
= len
;
3251 qemu_cond_signal(&decomp_param
[idx
].cond
);
3252 qemu_mutex_unlock(&decomp_param
[idx
].mutex
);
3256 if (idx
< thread_count
) {
3259 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3262 qemu_mutex_unlock(&decomp_done_lock
);
3266 * ram_load_setup: Setup RAM for migration incoming side
3268 * Returns zero to indicate success and negative for error
3270 * @f: QEMUFile where to receive the data
3271 * @opaque: RAMState pointer
3273 static int ram_load_setup(QEMUFile
*f
, void *opaque
)
3275 if (compress_threads_load_setup(f
)) {
3279 xbzrle_load_setup();
3280 ramblock_recv_map_init();
3284 static int ram_load_cleanup(void *opaque
)
3287 xbzrle_load_cleanup();
3288 compress_threads_load_cleanup();
3290 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
3291 g_free(rb
->receivedmap
);
3292 rb
->receivedmap
= NULL
;
3298 * ram_postcopy_incoming_init: allocate postcopy data structures
3300 * Returns 0 for success and negative if there was one error
3302 * @mis: current migration incoming state
3304 * Allocate data structures etc needed by incoming migration with
3305 * postcopy-ram. postcopy-ram's similarly names
3306 * postcopy_ram_incoming_init does the work.
3308 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3310 unsigned long ram_pages
= last_ram_page();
3312 return postcopy_ram_incoming_init(mis
, ram_pages
);
3316 * ram_load_postcopy: load a page in postcopy case
3318 * Returns 0 for success or -errno in case of error
3320 * Called in postcopy mode by ram_load().
3321 * rcu_read_lock is taken prior to this being called.
3323 * @f: QEMUFile where to send the data
3325 static int ram_load_postcopy(QEMUFile
*f
)
3327 int flags
= 0, ret
= 0;
3328 bool place_needed
= false;
3329 bool matching_page_sizes
= false;
3330 MigrationIncomingState
*mis
= migration_incoming_get_current();
3331 /* Temporary page that is later 'placed' */
3332 void *postcopy_host_page
= postcopy_get_tmp_page(mis
);
3333 void *last_host
= NULL
;
3334 bool all_zero
= false;
3336 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3339 void *page_buffer
= NULL
;
3340 void *place_source
= NULL
;
3341 RAMBlock
*block
= NULL
;
3344 addr
= qemu_get_be64(f
);
3347 * If qemu file error, we should stop here, and then "addr"
3350 ret
= qemu_file_get_error(f
);
3355 flags
= addr
& ~TARGET_PAGE_MASK
;
3356 addr
&= TARGET_PAGE_MASK
;
3358 trace_ram_load_postcopy_loop((uint64_t)addr
, flags
);
3359 place_needed
= false;
3360 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
)) {
3361 block
= ram_block_from_stream(f
, flags
);
3363 host
= host_from_ram_block_offset(block
, addr
);
3365 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3369 matching_page_sizes
= block
->page_size
== TARGET_PAGE_SIZE
;
3371 * Postcopy requires that we place whole host pages atomically;
3372 * these may be huge pages for RAMBlocks that are backed by
3374 * To make it atomic, the data is read into a temporary page
3375 * that's moved into place later.
3376 * The migration protocol uses, possibly smaller, target-pages
3377 * however the source ensures it always sends all the components
3378 * of a host page in order.
3380 page_buffer
= postcopy_host_page
+
3381 ((uintptr_t)host
& (block
->page_size
- 1));
3382 /* If all TP are zero then we can optimise the place */
3383 if (!((uintptr_t)host
& (block
->page_size
- 1))) {
3386 /* not the 1st TP within the HP */
3387 if (host
!= (last_host
+ TARGET_PAGE_SIZE
)) {
3388 error_report("Non-sequential target page %p/%p",
3397 * If it's the last part of a host page then we place the host
3400 place_needed
= (((uintptr_t)host
+ TARGET_PAGE_SIZE
) &
3401 (block
->page_size
- 1)) == 0;
3402 place_source
= postcopy_host_page
;
3406 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3407 case RAM_SAVE_FLAG_ZERO
:
3408 ch
= qemu_get_byte(f
);
3409 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3415 case RAM_SAVE_FLAG_PAGE
:
3417 if (!place_needed
|| !matching_page_sizes
) {
3418 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3420 /* Avoids the qemu_file copy during postcopy, which is
3421 * going to do a copy later; can only do it when we
3422 * do this read in one go (matching page sizes)
3424 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3428 case RAM_SAVE_FLAG_EOS
:
3432 error_report("Unknown combination of migration flags: %#x"
3433 " (postcopy mode)", flags
);
3438 /* Detect for any possible file errors */
3439 if (!ret
&& qemu_file_get_error(f
)) {
3440 ret
= qemu_file_get_error(f
);
3443 if (!ret
&& place_needed
) {
3444 /* This gets called at the last target page in the host page */
3445 void *place_dest
= host
+ TARGET_PAGE_SIZE
- block
->page_size
;
3448 ret
= postcopy_place_page_zero(mis
, place_dest
,
3451 ret
= postcopy_place_page(mis
, place_dest
,
3452 place_source
, block
);
3460 static bool postcopy_is_advised(void)
3462 PostcopyState ps
= postcopy_state_get();
3463 return ps
>= POSTCOPY_INCOMING_ADVISE
&& ps
< POSTCOPY_INCOMING_END
;
3466 static bool postcopy_is_running(void)
3468 PostcopyState ps
= postcopy_state_get();
3469 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3472 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
3474 int flags
= 0, ret
= 0, invalid_flags
= 0;
3475 static uint64_t seq_iter
;
3478 * If system is running in postcopy mode, page inserts to host memory must
3481 bool postcopy_running
= postcopy_is_running();
3482 /* ADVISE is earlier, it shows the source has the postcopy capability on */
3483 bool postcopy_advised
= postcopy_is_advised();
3487 if (version_id
!= 4) {
3491 if (!migrate_use_compression()) {
3492 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
3494 /* This RCU critical section can be very long running.
3495 * When RCU reclaims in the code start to become numerous,
3496 * it will be necessary to reduce the granularity of this
3501 if (postcopy_running
) {
3502 ret
= ram_load_postcopy(f
);
3505 while (!postcopy_running
&& !ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3506 ram_addr_t addr
, total_ram_bytes
;
3510 addr
= qemu_get_be64(f
);
3511 flags
= addr
& ~TARGET_PAGE_MASK
;
3512 addr
&= TARGET_PAGE_MASK
;
3514 if (flags
& invalid_flags
) {
3515 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
3516 error_report("Received an unexpected compressed page");
3523 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3524 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
3525 RAMBlock
*block
= ram_block_from_stream(f
, flags
);
3527 host
= host_from_ram_block_offset(block
, addr
);
3529 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3533 ramblock_recv_bitmap_set(block
, host
);
3534 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
3537 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3538 case RAM_SAVE_FLAG_MEM_SIZE
:
3539 /* Synchronize RAM block list */
3540 total_ram_bytes
= addr
;
3541 while (!ret
&& total_ram_bytes
) {
3546 len
= qemu_get_byte(f
);
3547 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3549 length
= qemu_get_be64(f
);
3551 block
= qemu_ram_block_by_name(id
);
3552 if (block
&& !qemu_ram_is_migratable(block
)) {
3553 error_report("block %s should not be migrated !", id
);
3556 if (length
!= block
->used_length
) {
3557 Error
*local_err
= NULL
;
3559 ret
= qemu_ram_resize(block
, length
,
3562 error_report_err(local_err
);
3565 /* For postcopy we need to check hugepage sizes match */
3566 if (postcopy_advised
&&
3567 block
->page_size
!= qemu_host_page_size
) {
3568 uint64_t remote_page_size
= qemu_get_be64(f
);
3569 if (remote_page_size
!= block
->page_size
) {
3570 error_report("Mismatched RAM page size %s "
3571 "(local) %zd != %" PRId64
,
3572 id
, block
->page_size
,
3577 ram_control_load_hook(f
, RAM_CONTROL_BLOCK_REG
,
3580 error_report("Unknown ramblock \"%s\", cannot "
3581 "accept migration", id
);
3585 total_ram_bytes
-= length
;
3589 case RAM_SAVE_FLAG_ZERO
:
3590 ch
= qemu_get_byte(f
);
3591 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
3594 case RAM_SAVE_FLAG_PAGE
:
3595 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
3598 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
3599 len
= qemu_get_be32(f
);
3600 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
3601 error_report("Invalid compressed data length: %d", len
);
3605 decompress_data_with_multi_threads(f
, host
, len
);
3608 case RAM_SAVE_FLAG_XBZRLE
:
3609 if (load_xbzrle(f
, addr
, host
) < 0) {
3610 error_report("Failed to decompress XBZRLE page at "
3611 RAM_ADDR_FMT
, addr
);
3616 case RAM_SAVE_FLAG_EOS
:
3620 if (flags
& RAM_SAVE_FLAG_HOOK
) {
3621 ram_control_load_hook(f
, RAM_CONTROL_HOOK
, NULL
);
3623 error_report("Unknown combination of migration flags: %#x",
3629 ret
= qemu_file_get_error(f
);
3633 ret
|= wait_for_decompress_done();
3635 trace_ram_load_complete(ret
, seq_iter
);
3639 static bool ram_has_postcopy(void *opaque
)
3641 return migrate_postcopy_ram();
3644 /* Sync all the dirty bitmap with destination VM. */
3645 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
3648 QEMUFile
*file
= s
->to_dst_file
;
3649 int ramblock_count
= 0;
3651 trace_ram_dirty_bitmap_sync_start();
3653 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3654 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
3655 trace_ram_dirty_bitmap_request(block
->idstr
);
3659 trace_ram_dirty_bitmap_sync_wait();
3661 /* Wait until all the ramblocks' dirty bitmap synced */
3662 while (ramblock_count
--) {
3663 qemu_sem_wait(&s
->rp_state
.rp_sem
);
3666 trace_ram_dirty_bitmap_sync_complete();
3671 static void ram_dirty_bitmap_reload_notify(MigrationState
*s
)
3673 qemu_sem_post(&s
->rp_state
.rp_sem
);
3677 * Read the received bitmap, revert it as the initial dirty bitmap.
3678 * This is only used when the postcopy migration is paused but wants
3679 * to resume from a middle point.
3681 int ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
)
3684 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
3685 unsigned long *le_bitmap
, nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
3686 uint64_t local_size
= nbits
/ 8;
3687 uint64_t size
, end_mark
;
3689 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
3691 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
3692 error_report("%s: incorrect state %s", __func__
,
3693 MigrationStatus_str(s
->state
));
3698 * Note: see comments in ramblock_recv_bitmap_send() on why we
3699 * need the endianess convertion, and the paddings.
3701 local_size
= ROUND_UP(local_size
, 8);
3704 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
3706 size
= qemu_get_be64(file
);
3708 /* The size of the bitmap should match with our ramblock */
3709 if (size
!= local_size
) {
3710 error_report("%s: ramblock '%s' bitmap size mismatch "
3711 "(0x%"PRIx64
" != 0x%"PRIx64
")", __func__
,
3712 block
->idstr
, size
, local_size
);
3717 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
3718 end_mark
= qemu_get_be64(file
);
3720 ret
= qemu_file_get_error(file
);
3721 if (ret
|| size
!= local_size
) {
3722 error_report("%s: read bitmap failed for ramblock '%s': %d"
3723 " (size 0x%"PRIx64
", got: 0x%"PRIx64
")",
3724 __func__
, block
->idstr
, ret
, local_size
, size
);
3729 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
3730 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64
,
3731 __func__
, block
->idstr
, end_mark
);
3737 * Endianess convertion. We are during postcopy (though paused).
3738 * The dirty bitmap won't change. We can directly modify it.
3740 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
3743 * What we received is "received bitmap". Revert it as the initial
3744 * dirty bitmap for this ramblock.
3746 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
3748 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
3751 * We succeeded to sync bitmap for current ramblock. If this is
3752 * the last one to sync, we need to notify the main send thread.
3754 ram_dirty_bitmap_reload_notify(s
);
3762 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
3764 RAMState
*rs
= *(RAMState
**)opaque
;
3767 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
3772 ram_state_resume_prepare(rs
, s
->to_dst_file
);
3777 static SaveVMHandlers savevm_ram_handlers
= {
3778 .save_setup
= ram_save_setup
,
3779 .save_live_iterate
= ram_save_iterate
,
3780 .save_live_complete_postcopy
= ram_save_complete
,
3781 .save_live_complete_precopy
= ram_save_complete
,
3782 .has_postcopy
= ram_has_postcopy
,
3783 .save_live_pending
= ram_save_pending
,
3784 .load_state
= ram_load
,
3785 .save_cleanup
= ram_save_cleanup
,
3786 .load_setup
= ram_load_setup
,
3787 .load_cleanup
= ram_load_cleanup
,
3788 .resume_prepare
= ram_resume_prepare
,
3791 void ram_mig_init(void)
3793 qemu_mutex_init(&XBZRLE
.lock
);
3794 register_savevm_live(NULL
, "ram", 0, 4, &savevm_ram_handlers
, &ram_state
);