migration: Abstract the number of bytes sent
[qemu.git] / migration / ram.c
blobfd144fdf9a24e0f4c20ed70455f2cc660c3c4645
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
29 #include "qemu/osdep.h"
30 #include "cpu.h"
31 #include <zlib.h>
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "xbzrle.h"
37 #include "ram.h"
38 #include "migration.h"
39 #include "socket.h"
40 #include "migration/register.h"
41 #include "migration/misc.h"
42 #include "qemu-file.h"
43 #include "postcopy-ram.h"
44 #include "page_cache.h"
45 #include "qemu/error-report.h"
46 #include "qapi/error.h"
47 #include "qapi/qapi-events-migration.h"
48 #include "qapi/qmp/qerror.h"
49 #include "trace.h"
50 #include "exec/ram_addr.h"
51 #include "exec/target_page.h"
52 #include "qemu/rcu_queue.h"
53 #include "migration/colo.h"
54 #include "block.h"
55 #include "sysemu/sysemu.h"
56 #include "qemu/uuid.h"
57 #include "savevm.h"
59 /***********************************************************/
60 /* ram save/restore */
62 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
63 * worked for pages that where filled with the same char. We switched
64 * it to only search for the zero value. And to avoid confusion with
65 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
69 #define RAM_SAVE_FLAG_ZERO 0x02
70 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
71 #define RAM_SAVE_FLAG_PAGE 0x08
72 #define RAM_SAVE_FLAG_EOS 0x10
73 #define RAM_SAVE_FLAG_CONTINUE 0x20
74 #define RAM_SAVE_FLAG_XBZRLE 0x40
75 /* 0x80 is reserved in migration.h start with 0x100 next */
76 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
78 static inline bool is_zero_range(uint8_t *p, uint64_t size)
80 return buffer_is_zero(p, size);
83 XBZRLECacheStats xbzrle_counters;
85 /* struct contains XBZRLE cache and a static page
86 used by the compression */
87 static struct {
88 /* buffer used for XBZRLE encoding */
89 uint8_t *encoded_buf;
90 /* buffer for storing page content */
91 uint8_t *current_buf;
92 /* Cache for XBZRLE, Protected by lock. */
93 PageCache *cache;
94 QemuMutex lock;
95 /* it will store a page full of zeros */
96 uint8_t *zero_target_page;
97 /* buffer used for XBZRLE decoding */
98 uint8_t *decoded_buf;
99 } XBZRLE;
101 static void XBZRLE_cache_lock(void)
103 if (migrate_use_xbzrle())
104 qemu_mutex_lock(&XBZRLE.lock);
107 static void XBZRLE_cache_unlock(void)
109 if (migrate_use_xbzrle())
110 qemu_mutex_unlock(&XBZRLE.lock);
114 * xbzrle_cache_resize: resize the xbzrle cache
116 * This function is called from qmp_migrate_set_cache_size in main
117 * thread, possibly while a migration is in progress. A running
118 * migration may be using the cache and might finish during this call,
119 * hence changes to the cache are protected by XBZRLE.lock().
121 * Returns 0 for success or -1 for error
123 * @new_size: new cache size
124 * @errp: set *errp if the check failed, with reason
126 int xbzrle_cache_resize(int64_t new_size, Error **errp)
128 PageCache *new_cache;
129 int64_t ret = 0;
131 /* Check for truncation */
132 if (new_size != (size_t)new_size) {
133 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
134 "exceeding address space");
135 return -1;
138 if (new_size == migrate_xbzrle_cache_size()) {
139 /* nothing to do */
140 return 0;
143 XBZRLE_cache_lock();
145 if (XBZRLE.cache != NULL) {
146 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
147 if (!new_cache) {
148 ret = -1;
149 goto out;
152 cache_fini(XBZRLE.cache);
153 XBZRLE.cache = new_cache;
155 out:
156 XBZRLE_cache_unlock();
157 return ret;
160 /* Should be holding either ram_list.mutex, or the RCU lock. */
161 #define RAMBLOCK_FOREACH_MIGRATABLE(block) \
162 INTERNAL_RAMBLOCK_FOREACH(block) \
163 if (!qemu_ram_is_migratable(block)) {} else
165 #undef RAMBLOCK_FOREACH
167 static void ramblock_recv_map_init(void)
169 RAMBlock *rb;
171 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
172 assert(!rb->receivedmap);
173 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
177 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
179 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
180 rb->receivedmap);
183 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
185 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
188 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
190 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
193 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
194 size_t nr)
196 bitmap_set_atomic(rb->receivedmap,
197 ramblock_recv_bitmap_offset(host_addr, rb),
198 nr);
201 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
204 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
206 * Returns >0 if success with sent bytes, or <0 if error.
208 int64_t ramblock_recv_bitmap_send(QEMUFile *file,
209 const char *block_name)
211 RAMBlock *block = qemu_ram_block_by_name(block_name);
212 unsigned long *le_bitmap, nbits;
213 uint64_t size;
215 if (!block) {
216 error_report("%s: invalid block name: %s", __func__, block_name);
217 return -1;
220 nbits = block->used_length >> TARGET_PAGE_BITS;
223 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
224 * machines we may need 4 more bytes for padding (see below
225 * comment). So extend it a bit before hand.
227 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
230 * Always use little endian when sending the bitmap. This is
231 * required that when source and destination VMs are not using the
232 * same endianess. (Note: big endian won't work.)
234 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
236 /* Size of the bitmap, in bytes */
237 size = nbits / 8;
240 * size is always aligned to 8 bytes for 64bit machines, but it
241 * may not be true for 32bit machines. We need this padding to
242 * make sure the migration can survive even between 32bit and
243 * 64bit machines.
245 size = ROUND_UP(size, 8);
247 qemu_put_be64(file, size);
248 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
250 * Mark as an end, in case the middle part is screwed up due to
251 * some "misterious" reason.
253 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
254 qemu_fflush(file);
256 g_free(le_bitmap);
258 if (qemu_file_get_error(file)) {
259 return qemu_file_get_error(file);
262 return size + sizeof(size);
266 * An outstanding page request, on the source, having been received
267 * and queued
269 struct RAMSrcPageRequest {
270 RAMBlock *rb;
271 hwaddr offset;
272 hwaddr len;
274 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
277 /* State of RAM for migration */
278 struct RAMState {
279 /* QEMUFile used for this migration */
280 QEMUFile *f;
281 /* Last block that we have visited searching for dirty pages */
282 RAMBlock *last_seen_block;
283 /* Last block from where we have sent data */
284 RAMBlock *last_sent_block;
285 /* Last dirty target page we have sent */
286 ram_addr_t last_page;
287 /* last ram version we have seen */
288 uint32_t last_version;
289 /* We are in the first round */
290 bool ram_bulk_stage;
291 /* How many times we have dirty too many pages */
292 int dirty_rate_high_cnt;
293 /* these variables are used for bitmap sync */
294 /* last time we did a full bitmap_sync */
295 int64_t time_last_bitmap_sync;
296 /* bytes transferred at start_time */
297 uint64_t bytes_xfer_prev;
298 /* number of dirty pages since start_time */
299 uint64_t num_dirty_pages_period;
300 /* xbzrle misses since the beginning of the period */
301 uint64_t xbzrle_cache_miss_prev;
302 /* number of iterations at the beginning of period */
303 uint64_t iterations_prev;
304 /* Iterations since start */
305 uint64_t iterations;
306 /* number of dirty bits in the bitmap */
307 uint64_t migration_dirty_pages;
308 /* protects modification of the bitmap */
309 QemuMutex bitmap_mutex;
310 /* The RAMBlock used in the last src_page_requests */
311 RAMBlock *last_req_rb;
312 /* Queue of outstanding page requests from the destination */
313 QemuMutex src_page_req_mutex;
314 QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
316 typedef struct RAMState RAMState;
318 static RAMState *ram_state;
320 uint64_t ram_bytes_remaining(void)
322 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
326 MigrationStats ram_counters;
328 /* used by the search for pages to send */
329 struct PageSearchStatus {
330 /* Current block being searched */
331 RAMBlock *block;
332 /* Current page to search from */
333 unsigned long page;
334 /* Set once we wrap around */
335 bool complete_round;
337 typedef struct PageSearchStatus PageSearchStatus;
339 struct CompressParam {
340 bool done;
341 bool quit;
342 QEMUFile *file;
343 QemuMutex mutex;
344 QemuCond cond;
345 RAMBlock *block;
346 ram_addr_t offset;
348 /* internally used fields */
349 z_stream stream;
350 uint8_t *originbuf;
352 typedef struct CompressParam CompressParam;
354 struct DecompressParam {
355 bool done;
356 bool quit;
357 QemuMutex mutex;
358 QemuCond cond;
359 void *des;
360 uint8_t *compbuf;
361 int len;
362 z_stream stream;
364 typedef struct DecompressParam DecompressParam;
366 static CompressParam *comp_param;
367 static QemuThread *compress_threads;
368 /* comp_done_cond is used to wake up the migration thread when
369 * one of the compression threads has finished the compression.
370 * comp_done_lock is used to co-work with comp_done_cond.
372 static QemuMutex comp_done_lock;
373 static QemuCond comp_done_cond;
374 /* The empty QEMUFileOps will be used by file in CompressParam */
375 static const QEMUFileOps empty_ops = { };
377 static QEMUFile *decomp_file;
378 static DecompressParam *decomp_param;
379 static QemuThread *decompress_threads;
380 static QemuMutex decomp_done_lock;
381 static QemuCond decomp_done_cond;
383 static int do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
384 ram_addr_t offset, uint8_t *source_buf);
386 static void *do_data_compress(void *opaque)
388 CompressParam *param = opaque;
389 RAMBlock *block;
390 ram_addr_t offset;
392 qemu_mutex_lock(&param->mutex);
393 while (!param->quit) {
394 if (param->block) {
395 block = param->block;
396 offset = param->offset;
397 param->block = NULL;
398 qemu_mutex_unlock(&param->mutex);
400 do_compress_ram_page(param->file, &param->stream, block, offset,
401 param->originbuf);
403 qemu_mutex_lock(&comp_done_lock);
404 param->done = true;
405 qemu_cond_signal(&comp_done_cond);
406 qemu_mutex_unlock(&comp_done_lock);
408 qemu_mutex_lock(&param->mutex);
409 } else {
410 qemu_cond_wait(&param->cond, &param->mutex);
413 qemu_mutex_unlock(&param->mutex);
415 return NULL;
418 static inline void terminate_compression_threads(void)
420 int idx, thread_count;
422 thread_count = migrate_compress_threads();
424 for (idx = 0; idx < thread_count; idx++) {
425 qemu_mutex_lock(&comp_param[idx].mutex);
426 comp_param[idx].quit = true;
427 qemu_cond_signal(&comp_param[idx].cond);
428 qemu_mutex_unlock(&comp_param[idx].mutex);
432 static void compress_threads_save_cleanup(void)
434 int i, thread_count;
436 if (!migrate_use_compression()) {
437 return;
439 terminate_compression_threads();
440 thread_count = migrate_compress_threads();
441 for (i = 0; i < thread_count; i++) {
443 * we use it as a indicator which shows if the thread is
444 * properly init'd or not
446 if (!comp_param[i].file) {
447 break;
449 qemu_thread_join(compress_threads + i);
450 qemu_mutex_destroy(&comp_param[i].mutex);
451 qemu_cond_destroy(&comp_param[i].cond);
452 deflateEnd(&comp_param[i].stream);
453 g_free(comp_param[i].originbuf);
454 qemu_fclose(comp_param[i].file);
455 comp_param[i].file = NULL;
457 qemu_mutex_destroy(&comp_done_lock);
458 qemu_cond_destroy(&comp_done_cond);
459 g_free(compress_threads);
460 g_free(comp_param);
461 compress_threads = NULL;
462 comp_param = NULL;
465 static int compress_threads_save_setup(void)
467 int i, thread_count;
469 if (!migrate_use_compression()) {
470 return 0;
472 thread_count = migrate_compress_threads();
473 compress_threads = g_new0(QemuThread, thread_count);
474 comp_param = g_new0(CompressParam, thread_count);
475 qemu_cond_init(&comp_done_cond);
476 qemu_mutex_init(&comp_done_lock);
477 for (i = 0; i < thread_count; i++) {
478 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
479 if (!comp_param[i].originbuf) {
480 goto exit;
483 if (deflateInit(&comp_param[i].stream,
484 migrate_compress_level()) != Z_OK) {
485 g_free(comp_param[i].originbuf);
486 goto exit;
489 /* comp_param[i].file is just used as a dummy buffer to save data,
490 * set its ops to empty.
492 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
493 comp_param[i].done = true;
494 comp_param[i].quit = false;
495 qemu_mutex_init(&comp_param[i].mutex);
496 qemu_cond_init(&comp_param[i].cond);
497 qemu_thread_create(compress_threads + i, "compress",
498 do_data_compress, comp_param + i,
499 QEMU_THREAD_JOINABLE);
501 return 0;
503 exit:
504 compress_threads_save_cleanup();
505 return -1;
508 /* Multiple fd's */
510 #define MULTIFD_MAGIC 0x11223344U
511 #define MULTIFD_VERSION 1
513 typedef struct {
514 uint32_t magic;
515 uint32_t version;
516 unsigned char uuid[16]; /* QemuUUID */
517 uint8_t id;
518 } __attribute__((packed)) MultiFDInit_t;
520 typedef struct {
521 uint32_t magic;
522 uint32_t version;
523 uint32_t flags;
524 uint32_t size;
525 uint32_t used;
526 uint64_t packet_num;
527 char ramblock[256];
528 uint64_t offset[];
529 } __attribute__((packed)) MultiFDPacket_t;
531 typedef struct {
532 /* number of used pages */
533 uint32_t used;
534 /* number of allocated pages */
535 uint32_t allocated;
536 /* global number of generated multifd packets */
537 uint64_t packet_num;
538 /* offset of each page */
539 ram_addr_t *offset;
540 /* pointer to each page */
541 struct iovec *iov;
542 RAMBlock *block;
543 } MultiFDPages_t;
545 typedef struct {
546 /* this fields are not changed once the thread is created */
547 /* channel number */
548 uint8_t id;
549 /* channel thread name */
550 char *name;
551 /* channel thread id */
552 QemuThread thread;
553 /* communication channel */
554 QIOChannel *c;
555 /* sem where to wait for more work */
556 QemuSemaphore sem;
557 /* this mutex protects the following parameters */
558 QemuMutex mutex;
559 /* is this channel thread running */
560 bool running;
561 /* should this thread finish */
562 bool quit;
563 /* array of pages to sent */
564 MultiFDPages_t *pages;
565 /* packet allocated len */
566 uint32_t packet_len;
567 /* pointer to the packet */
568 MultiFDPacket_t *packet;
569 /* multifd flags for each packet */
570 uint32_t flags;
571 /* global number of generated multifd packets */
572 uint64_t packet_num;
573 } MultiFDSendParams;
575 typedef struct {
576 /* this fields are not changed once the thread is created */
577 /* channel number */
578 uint8_t id;
579 /* channel thread name */
580 char *name;
581 /* channel thread id */
582 QemuThread thread;
583 /* communication channel */
584 QIOChannel *c;
585 /* sem where to wait for more work */
586 QemuSemaphore sem;
587 /* this mutex protects the following parameters */
588 QemuMutex mutex;
589 /* is this channel thread running */
590 bool running;
591 /* should this thread finish */
592 bool quit;
593 /* array of pages to receive */
594 MultiFDPages_t *pages;
595 /* packet allocated len */
596 uint32_t packet_len;
597 /* pointer to the packet */
598 MultiFDPacket_t *packet;
599 /* multifd flags for each packet */
600 uint32_t flags;
601 /* global number of generated multifd packets */
602 uint64_t packet_num;
603 } MultiFDRecvParams;
605 static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
607 MultiFDInit_t msg;
608 int ret;
610 msg.magic = cpu_to_be32(MULTIFD_MAGIC);
611 msg.version = cpu_to_be32(MULTIFD_VERSION);
612 msg.id = p->id;
613 memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
615 ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
616 if (ret != 0) {
617 return -1;
619 return 0;
622 static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
624 MultiFDInit_t msg;
625 int ret;
627 ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
628 if (ret != 0) {
629 return -1;
632 be32_to_cpus(&msg.magic);
633 be32_to_cpus(&msg.version);
635 if (msg.magic != MULTIFD_MAGIC) {
636 error_setg(errp, "multifd: received packet magic %x "
637 "expected %x", msg.magic, MULTIFD_MAGIC);
638 return -1;
641 if (msg.version != MULTIFD_VERSION) {
642 error_setg(errp, "multifd: received packet version %d "
643 "expected %d", msg.version, MULTIFD_VERSION);
644 return -1;
647 if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
648 char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
649 char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
651 error_setg(errp, "multifd: received uuid '%s' and expected "
652 "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
653 g_free(uuid);
654 g_free(msg_uuid);
655 return -1;
658 if (msg.id > migrate_multifd_channels()) {
659 error_setg(errp, "multifd: received channel version %d "
660 "expected %d", msg.version, MULTIFD_VERSION);
661 return -1;
664 return msg.id;
667 static MultiFDPages_t *multifd_pages_init(size_t size)
669 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
671 pages->allocated = size;
672 pages->iov = g_new0(struct iovec, size);
673 pages->offset = g_new0(ram_addr_t, size);
675 return pages;
678 static void multifd_pages_clear(MultiFDPages_t *pages)
680 pages->used = 0;
681 pages->allocated = 0;
682 pages->packet_num = 0;
683 pages->block = NULL;
684 g_free(pages->iov);
685 pages->iov = NULL;
686 g_free(pages->offset);
687 pages->offset = NULL;
688 g_free(pages);
691 static void multifd_send_fill_packet(MultiFDSendParams *p)
693 MultiFDPacket_t *packet = p->packet;
694 int i;
696 packet->magic = cpu_to_be32(MULTIFD_MAGIC);
697 packet->version = cpu_to_be32(MULTIFD_VERSION);
698 packet->flags = cpu_to_be32(p->flags);
699 packet->size = cpu_to_be32(migrate_multifd_page_count());
700 packet->used = cpu_to_be32(p->pages->used);
701 packet->packet_num = cpu_to_be64(p->packet_num);
703 if (p->pages->block) {
704 strncpy(packet->ramblock, p->pages->block->idstr, 256);
707 for (i = 0; i < p->pages->used; i++) {
708 packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
712 static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
714 MultiFDPacket_t *packet = p->packet;
715 RAMBlock *block;
716 int i;
718 /* ToDo: We can't use it until we haven't received a message */
719 return 0;
721 be32_to_cpus(&packet->magic);
722 if (packet->magic != MULTIFD_MAGIC) {
723 error_setg(errp, "multifd: received packet "
724 "magic %x and expected magic %x",
725 packet->magic, MULTIFD_MAGIC);
726 return -1;
729 be32_to_cpus(&packet->version);
730 if (packet->version != MULTIFD_VERSION) {
731 error_setg(errp, "multifd: received packet "
732 "version %d and expected version %d",
733 packet->version, MULTIFD_VERSION);
734 return -1;
737 p->flags = be32_to_cpu(packet->flags);
739 be32_to_cpus(&packet->size);
740 if (packet->size > migrate_multifd_page_count()) {
741 error_setg(errp, "multifd: received packet "
742 "with size %d and expected maximum size %d",
743 packet->size, migrate_multifd_page_count()) ;
744 return -1;
747 p->pages->used = be32_to_cpu(packet->used);
748 if (p->pages->used > packet->size) {
749 error_setg(errp, "multifd: received packet "
750 "with size %d and expected maximum size %d",
751 p->pages->used, packet->size) ;
752 return -1;
755 p->packet_num = be64_to_cpu(packet->packet_num);
757 if (p->pages->used) {
758 /* make sure that ramblock is 0 terminated */
759 packet->ramblock[255] = 0;
760 block = qemu_ram_block_by_name(packet->ramblock);
761 if (!block) {
762 error_setg(errp, "multifd: unknown ram block %s",
763 packet->ramblock);
764 return -1;
768 for (i = 0; i < p->pages->used; i++) {
769 ram_addr_t offset = be64_to_cpu(packet->offset[i]);
771 if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
772 error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
773 " (max " RAM_ADDR_FMT ")",
774 offset, block->max_length);
775 return -1;
777 p->pages->iov[i].iov_base = block->host + offset;
778 p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
781 return 0;
784 struct {
785 MultiFDSendParams *params;
786 /* number of created threads */
787 int count;
788 /* array of pages to sent */
789 MultiFDPages_t *pages;
790 } *multifd_send_state;
792 static void multifd_send_terminate_threads(Error *err)
794 int i;
796 if (err) {
797 MigrationState *s = migrate_get_current();
798 migrate_set_error(s, err);
799 if (s->state == MIGRATION_STATUS_SETUP ||
800 s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
801 s->state == MIGRATION_STATUS_DEVICE ||
802 s->state == MIGRATION_STATUS_ACTIVE) {
803 migrate_set_state(&s->state, s->state,
804 MIGRATION_STATUS_FAILED);
808 for (i = 0; i < migrate_multifd_channels(); i++) {
809 MultiFDSendParams *p = &multifd_send_state->params[i];
811 qemu_mutex_lock(&p->mutex);
812 p->quit = true;
813 qemu_sem_post(&p->sem);
814 qemu_mutex_unlock(&p->mutex);
818 int multifd_save_cleanup(Error **errp)
820 int i;
821 int ret = 0;
823 if (!migrate_use_multifd()) {
824 return 0;
826 multifd_send_terminate_threads(NULL);
827 for (i = 0; i < migrate_multifd_channels(); i++) {
828 MultiFDSendParams *p = &multifd_send_state->params[i];
830 if (p->running) {
831 qemu_thread_join(&p->thread);
833 socket_send_channel_destroy(p->c);
834 p->c = NULL;
835 qemu_mutex_destroy(&p->mutex);
836 qemu_sem_destroy(&p->sem);
837 g_free(p->name);
838 p->name = NULL;
839 multifd_pages_clear(p->pages);
840 p->pages = NULL;
841 p->packet_len = 0;
842 g_free(p->packet);
843 p->packet = NULL;
845 g_free(multifd_send_state->params);
846 multifd_send_state->params = NULL;
847 multifd_pages_clear(multifd_send_state->pages);
848 multifd_send_state->pages = NULL;
849 g_free(multifd_send_state);
850 multifd_send_state = NULL;
851 return ret;
854 static void *multifd_send_thread(void *opaque)
856 MultiFDSendParams *p = opaque;
857 Error *local_err = NULL;
859 if (multifd_send_initial_packet(p, &local_err) < 0) {
860 goto out;
863 while (true) {
864 qemu_mutex_lock(&p->mutex);
865 multifd_send_fill_packet(p);
866 if (p->quit) {
867 qemu_mutex_unlock(&p->mutex);
868 break;
870 qemu_mutex_unlock(&p->mutex);
871 qemu_sem_wait(&p->sem);
874 out:
875 if (local_err) {
876 multifd_send_terminate_threads(local_err);
879 qemu_mutex_lock(&p->mutex);
880 p->running = false;
881 qemu_mutex_unlock(&p->mutex);
883 return NULL;
886 static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
888 MultiFDSendParams *p = opaque;
889 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
890 Error *local_err = NULL;
892 if (qio_task_propagate_error(task, &local_err)) {
893 if (multifd_save_cleanup(&local_err) != 0) {
894 migrate_set_error(migrate_get_current(), local_err);
896 } else {
897 p->c = QIO_CHANNEL(sioc);
898 qio_channel_set_delay(p->c, false);
899 p->running = true;
900 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
901 QEMU_THREAD_JOINABLE);
903 atomic_inc(&multifd_send_state->count);
907 int multifd_save_setup(void)
909 int thread_count;
910 uint32_t page_count = migrate_multifd_page_count();
911 uint8_t i;
913 if (!migrate_use_multifd()) {
914 return 0;
916 thread_count = migrate_multifd_channels();
917 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
918 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
919 atomic_set(&multifd_send_state->count, 0);
920 multifd_send_state->pages = multifd_pages_init(page_count);
922 for (i = 0; i < thread_count; i++) {
923 MultiFDSendParams *p = &multifd_send_state->params[i];
925 qemu_mutex_init(&p->mutex);
926 qemu_sem_init(&p->sem, 0);
927 p->quit = false;
928 p->id = i;
929 p->pages = multifd_pages_init(page_count);
930 p->packet_len = sizeof(MultiFDPacket_t)
931 + sizeof(ram_addr_t) * page_count;
932 p->packet = g_malloc0(p->packet_len);
933 p->name = g_strdup_printf("multifdsend_%d", i);
934 socket_send_channel_create(multifd_new_send_channel_async, p);
936 return 0;
939 struct {
940 MultiFDRecvParams *params;
941 /* number of created threads */
942 int count;
943 } *multifd_recv_state;
945 static void multifd_recv_terminate_threads(Error *err)
947 int i;
949 if (err) {
950 MigrationState *s = migrate_get_current();
951 migrate_set_error(s, err);
952 if (s->state == MIGRATION_STATUS_SETUP ||
953 s->state == MIGRATION_STATUS_ACTIVE) {
954 migrate_set_state(&s->state, s->state,
955 MIGRATION_STATUS_FAILED);
959 for (i = 0; i < migrate_multifd_channels(); i++) {
960 MultiFDRecvParams *p = &multifd_recv_state->params[i];
962 qemu_mutex_lock(&p->mutex);
963 p->quit = true;
964 qemu_sem_post(&p->sem);
965 qemu_mutex_unlock(&p->mutex);
969 int multifd_load_cleanup(Error **errp)
971 int i;
972 int ret = 0;
974 if (!migrate_use_multifd()) {
975 return 0;
977 multifd_recv_terminate_threads(NULL);
978 for (i = 0; i < migrate_multifd_channels(); i++) {
979 MultiFDRecvParams *p = &multifd_recv_state->params[i];
981 if (p->running) {
982 qemu_thread_join(&p->thread);
984 object_unref(OBJECT(p->c));
985 p->c = NULL;
986 qemu_mutex_destroy(&p->mutex);
987 qemu_sem_destroy(&p->sem);
988 g_free(p->name);
989 p->name = NULL;
990 multifd_pages_clear(p->pages);
991 p->pages = NULL;
992 p->packet_len = 0;
993 g_free(p->packet);
994 p->packet = NULL;
996 g_free(multifd_recv_state->params);
997 multifd_recv_state->params = NULL;
998 g_free(multifd_recv_state);
999 multifd_recv_state = NULL;
1001 return ret;
1004 static void *multifd_recv_thread(void *opaque)
1006 MultiFDRecvParams *p = opaque;
1007 Error *local_err = NULL;
1008 int ret;
1010 while (true) {
1011 qemu_mutex_lock(&p->mutex);
1012 if (false) {
1013 /* ToDo: Packet reception goes here */
1015 ret = multifd_recv_unfill_packet(p, &local_err);
1016 qemu_mutex_unlock(&p->mutex);
1017 if (ret) {
1018 break;
1020 } else if (p->quit) {
1021 qemu_mutex_unlock(&p->mutex);
1022 break;
1024 qemu_mutex_unlock(&p->mutex);
1025 qemu_sem_wait(&p->sem);
1028 qemu_mutex_lock(&p->mutex);
1029 p->running = false;
1030 qemu_mutex_unlock(&p->mutex);
1032 return NULL;
1035 int multifd_load_setup(void)
1037 int thread_count;
1038 uint32_t page_count = migrate_multifd_page_count();
1039 uint8_t i;
1041 if (!migrate_use_multifd()) {
1042 return 0;
1044 thread_count = migrate_multifd_channels();
1045 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1046 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
1047 atomic_set(&multifd_recv_state->count, 0);
1049 for (i = 0; i < thread_count; i++) {
1050 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1052 qemu_mutex_init(&p->mutex);
1053 qemu_sem_init(&p->sem, 0);
1054 p->quit = false;
1055 p->id = i;
1056 p->pages = multifd_pages_init(page_count);
1057 p->packet_len = sizeof(MultiFDPacket_t)
1058 + sizeof(ram_addr_t) * page_count;
1059 p->packet = g_malloc0(p->packet_len);
1060 p->name = g_strdup_printf("multifdrecv_%d", i);
1062 return 0;
1065 bool multifd_recv_all_channels_created(void)
1067 int thread_count = migrate_multifd_channels();
1069 if (!migrate_use_multifd()) {
1070 return true;
1073 return thread_count == atomic_read(&multifd_recv_state->count);
1076 void multifd_recv_new_channel(QIOChannel *ioc)
1078 MultiFDRecvParams *p;
1079 Error *local_err = NULL;
1080 int id;
1082 id = multifd_recv_initial_packet(ioc, &local_err);
1083 if (id < 0) {
1084 multifd_recv_terminate_threads(local_err);
1085 return;
1088 p = &multifd_recv_state->params[id];
1089 if (p->c != NULL) {
1090 error_setg(&local_err, "multifd: received id '%d' already setup'",
1091 id);
1092 multifd_recv_terminate_threads(local_err);
1093 return;
1095 p->c = ioc;
1096 object_ref(OBJECT(ioc));
1098 p->running = true;
1099 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1100 QEMU_THREAD_JOINABLE);
1101 atomic_inc(&multifd_recv_state->count);
1102 if (multifd_recv_state->count == migrate_multifd_channels()) {
1103 migration_incoming_process();
1108 * save_page_header: write page header to wire
1110 * If this is the 1st block, it also writes the block identification
1112 * Returns the number of bytes written
1114 * @f: QEMUFile where to send the data
1115 * @block: block that contains the page we want to send
1116 * @offset: offset inside the block for the page
1117 * in the lower bits, it contains flags
1119 static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
1120 ram_addr_t offset)
1122 size_t size, len;
1124 if (block == rs->last_sent_block) {
1125 offset |= RAM_SAVE_FLAG_CONTINUE;
1127 qemu_put_be64(f, offset);
1128 size = 8;
1130 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
1131 len = strlen(block->idstr);
1132 qemu_put_byte(f, len);
1133 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
1134 size += 1 + len;
1135 rs->last_sent_block = block;
1137 return size;
1141 * mig_throttle_guest_down: throotle down the guest
1143 * Reduce amount of guest cpu execution to hopefully slow down memory
1144 * writes. If guest dirty memory rate is reduced below the rate at
1145 * which we can transfer pages to the destination then we should be
1146 * able to complete migration. Some workloads dirty memory way too
1147 * fast and will not effectively converge, even with auto-converge.
1149 static void mig_throttle_guest_down(void)
1151 MigrationState *s = migrate_get_current();
1152 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1153 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
1155 /* We have not started throttling yet. Let's start it. */
1156 if (!cpu_throttle_active()) {
1157 cpu_throttle_set(pct_initial);
1158 } else {
1159 /* Throttling already on, just increase the rate */
1160 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
1165 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1167 * @rs: current RAM state
1168 * @current_addr: address for the zero page
1170 * Update the xbzrle cache to reflect a page that's been sent as all 0.
1171 * The important thing is that a stale (not-yet-0'd) page be replaced
1172 * by the new data.
1173 * As a bonus, if the page wasn't in the cache it gets added so that
1174 * when a small write is made into the 0'd page it gets XBZRLE sent.
1176 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
1178 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
1179 return;
1182 /* We don't care if this fails to allocate a new cache page
1183 * as long as it updated an old one */
1184 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
1185 ram_counters.dirty_sync_count);
1188 #define ENCODING_FLAG_XBZRLE 0x1
1191 * save_xbzrle_page: compress and send current page
1193 * Returns: 1 means that we wrote the page
1194 * 0 means that page is identical to the one already sent
1195 * -1 means that xbzrle would be longer than normal
1197 * @rs: current RAM state
1198 * @current_data: pointer to the address of the page contents
1199 * @current_addr: addr of the page
1200 * @block: block that contains the page we want to send
1201 * @offset: offset inside the block for the page
1202 * @last_stage: if we are at the completion stage
1204 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
1205 ram_addr_t current_addr, RAMBlock *block,
1206 ram_addr_t offset, bool last_stage)
1208 int encoded_len = 0, bytes_xbzrle;
1209 uint8_t *prev_cached_page;
1211 if (!cache_is_cached(XBZRLE.cache, current_addr,
1212 ram_counters.dirty_sync_count)) {
1213 xbzrle_counters.cache_miss++;
1214 if (!last_stage) {
1215 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
1216 ram_counters.dirty_sync_count) == -1) {
1217 return -1;
1218 } else {
1219 /* update *current_data when the page has been
1220 inserted into cache */
1221 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1224 return -1;
1227 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1229 /* save current buffer into memory */
1230 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1232 /* XBZRLE encoding (if there is no overflow) */
1233 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1234 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1235 TARGET_PAGE_SIZE);
1236 if (encoded_len == 0) {
1237 trace_save_xbzrle_page_skipping();
1238 return 0;
1239 } else if (encoded_len == -1) {
1240 trace_save_xbzrle_page_overflow();
1241 xbzrle_counters.overflow++;
1242 /* update data in the cache */
1243 if (!last_stage) {
1244 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
1245 *current_data = prev_cached_page;
1247 return -1;
1250 /* we need to update the data in the cache, in order to get the same data */
1251 if (!last_stage) {
1252 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1255 /* Send XBZRLE based compressed page */
1256 bytes_xbzrle = save_page_header(rs, rs->f, block,
1257 offset | RAM_SAVE_FLAG_XBZRLE);
1258 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1259 qemu_put_be16(rs->f, encoded_len);
1260 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
1261 bytes_xbzrle += encoded_len + 1 + 2;
1262 xbzrle_counters.pages++;
1263 xbzrle_counters.bytes += bytes_xbzrle;
1264 ram_counters.transferred += bytes_xbzrle;
1266 return 1;
1270 * migration_bitmap_find_dirty: find the next dirty page from start
1272 * Called with rcu_read_lock() to protect migration_bitmap
1274 * Returns the byte offset within memory region of the start of a dirty page
1276 * @rs: current RAM state
1277 * @rb: RAMBlock where to search for dirty pages
1278 * @start: page where we start the search
1280 static inline
1281 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
1282 unsigned long start)
1284 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1285 unsigned long *bitmap = rb->bmap;
1286 unsigned long next;
1288 if (!qemu_ram_is_migratable(rb)) {
1289 return size;
1292 if (rs->ram_bulk_stage && start > 0) {
1293 next = start + 1;
1294 } else {
1295 next = find_next_bit(bitmap, size, start);
1298 return next;
1301 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
1302 RAMBlock *rb,
1303 unsigned long page)
1305 bool ret;
1307 ret = test_and_clear_bit(page, rb->bmap);
1309 if (ret) {
1310 rs->migration_dirty_pages--;
1312 return ret;
1315 static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
1316 ram_addr_t start, ram_addr_t length)
1318 rs->migration_dirty_pages +=
1319 cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
1320 &rs->num_dirty_pages_period);
1324 * ram_pagesize_summary: calculate all the pagesizes of a VM
1326 * Returns a summary bitmap of the page sizes of all RAMBlocks
1328 * For VMs with just normal pages this is equivalent to the host page
1329 * size. If it's got some huge pages then it's the OR of all the
1330 * different page sizes.
1332 uint64_t ram_pagesize_summary(void)
1334 RAMBlock *block;
1335 uint64_t summary = 0;
1337 RAMBLOCK_FOREACH_MIGRATABLE(block) {
1338 summary |= block->page_size;
1341 return summary;
1344 static void migration_update_rates(RAMState *rs, int64_t end_time)
1346 uint64_t iter_count = rs->iterations - rs->iterations_prev;
1348 /* calculate period counters */
1349 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1350 / (end_time - rs->time_last_bitmap_sync);
1352 if (!iter_count) {
1353 return;
1356 if (migrate_use_xbzrle()) {
1357 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
1358 rs->xbzrle_cache_miss_prev) / iter_count;
1359 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1363 static void migration_bitmap_sync(RAMState *rs)
1365 RAMBlock *block;
1366 int64_t end_time;
1367 uint64_t bytes_xfer_now;
1369 ram_counters.dirty_sync_count++;
1371 if (!rs->time_last_bitmap_sync) {
1372 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1375 trace_migration_bitmap_sync_start();
1376 memory_global_dirty_log_sync();
1378 qemu_mutex_lock(&rs->bitmap_mutex);
1379 rcu_read_lock();
1380 RAMBLOCK_FOREACH_MIGRATABLE(block) {
1381 migration_bitmap_sync_range(rs, block, 0, block->used_length);
1383 ram_counters.remaining = ram_bytes_remaining();
1384 rcu_read_unlock();
1385 qemu_mutex_unlock(&rs->bitmap_mutex);
1387 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1389 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1391 /* more than 1 second = 1000 millisecons */
1392 if (end_time > rs->time_last_bitmap_sync + 1000) {
1393 bytes_xfer_now = ram_counters.transferred;
1395 /* During block migration the auto-converge logic incorrectly detects
1396 * that ram migration makes no progress. Avoid this by disabling the
1397 * throttling logic during the bulk phase of block migration. */
1398 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1399 /* The following detection logic can be refined later. For now:
1400 Check to see if the dirtied bytes is 50% more than the approx.
1401 amount of bytes that just got transferred since the last time we
1402 were in this routine. If that happens twice, start or increase
1403 throttling */
1405 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
1406 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
1407 (++rs->dirty_rate_high_cnt >= 2)) {
1408 trace_migration_throttle();
1409 rs->dirty_rate_high_cnt = 0;
1410 mig_throttle_guest_down();
1414 migration_update_rates(rs, end_time);
1416 rs->iterations_prev = rs->iterations;
1418 /* reset period counters */
1419 rs->time_last_bitmap_sync = end_time;
1420 rs->num_dirty_pages_period = 0;
1421 rs->bytes_xfer_prev = bytes_xfer_now;
1423 if (migrate_use_events()) {
1424 qapi_event_send_migration_pass(ram_counters.dirty_sync_count, NULL);
1429 * save_zero_page: send the zero page to the stream
1431 * Returns the number of pages written.
1433 * @rs: current RAM state
1434 * @block: block that contains the page we want to send
1435 * @offset: offset inside the block for the page
1437 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
1439 uint8_t *p = block->host + offset;
1440 int pages = -1;
1442 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1443 ram_counters.duplicate++;
1444 ram_counters.transferred +=
1445 save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_ZERO);
1446 qemu_put_byte(rs->f, 0);
1447 ram_counters.transferred += 1;
1448 pages = 1;
1451 return pages;
1454 static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
1456 if (!migrate_release_ram() || !migration_in_postcopy()) {
1457 return;
1460 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
1464 * @pages: the number of pages written by the control path,
1465 * < 0 - error
1466 * > 0 - number of pages written
1468 * Return true if the pages has been saved, otherwise false is returned.
1470 static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1471 int *pages)
1473 uint64_t bytes_xmit = 0;
1474 int ret;
1476 *pages = -1;
1477 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1478 &bytes_xmit);
1479 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1480 return false;
1483 if (bytes_xmit) {
1484 ram_counters.transferred += bytes_xmit;
1485 *pages = 1;
1488 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1489 return true;
1492 if (bytes_xmit > 0) {
1493 ram_counters.normal++;
1494 } else if (bytes_xmit == 0) {
1495 ram_counters.duplicate++;
1498 return true;
1502 * directly send the page to the stream
1504 * Returns the number of pages written.
1506 * @rs: current RAM state
1507 * @block: block that contains the page we want to send
1508 * @offset: offset inside the block for the page
1509 * @buf: the page to be sent
1510 * @async: send to page asyncly
1512 static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1513 uint8_t *buf, bool async)
1515 ram_counters.transferred += save_page_header(rs, rs->f, block,
1516 offset | RAM_SAVE_FLAG_PAGE);
1517 if (async) {
1518 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1519 migrate_release_ram() &
1520 migration_in_postcopy());
1521 } else {
1522 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1524 ram_counters.transferred += TARGET_PAGE_SIZE;
1525 ram_counters.normal++;
1526 return 1;
1530 * ram_save_page: send the given page to the stream
1532 * Returns the number of pages written.
1533 * < 0 - error
1534 * >=0 - Number of pages written - this might legally be 0
1535 * if xbzrle noticed the page was the same.
1537 * @rs: current RAM state
1538 * @block: block that contains the page we want to send
1539 * @offset: offset inside the block for the page
1540 * @last_stage: if we are at the completion stage
1542 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
1544 int pages = -1;
1545 uint8_t *p;
1546 bool send_async = true;
1547 RAMBlock *block = pss->block;
1548 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
1549 ram_addr_t current_addr = block->offset + offset;
1551 p = block->host + offset;
1552 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
1554 XBZRLE_cache_lock();
1555 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
1556 migrate_use_xbzrle()) {
1557 pages = save_xbzrle_page(rs, &p, current_addr, block,
1558 offset, last_stage);
1559 if (!last_stage) {
1560 /* Can't send this cached data async, since the cache page
1561 * might get updated before it gets to the wire
1563 send_async = false;
1567 /* XBZRLE overflow or normal page */
1568 if (pages == -1) {
1569 pages = save_normal_page(rs, block, offset, p, send_async);
1572 XBZRLE_cache_unlock();
1574 return pages;
1577 static int do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
1578 ram_addr_t offset, uint8_t *source_buf)
1580 RAMState *rs = ram_state;
1581 int bytes_sent, blen;
1582 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
1584 bytes_sent = save_page_header(rs, f, block, offset |
1585 RAM_SAVE_FLAG_COMPRESS_PAGE);
1588 * copy it to a internal buffer to avoid it being modified by VM
1589 * so that we can catch up the error during compression and
1590 * decompression
1592 memcpy(source_buf, p, TARGET_PAGE_SIZE);
1593 blen = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
1594 if (blen < 0) {
1595 bytes_sent = 0;
1596 qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
1597 error_report("compressed data failed!");
1598 } else {
1599 bytes_sent += blen;
1600 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
1603 return bytes_sent;
1606 static void flush_compressed_data(RAMState *rs)
1608 int idx, len, thread_count;
1610 if (!migrate_use_compression()) {
1611 return;
1613 thread_count = migrate_compress_threads();
1615 qemu_mutex_lock(&comp_done_lock);
1616 for (idx = 0; idx < thread_count; idx++) {
1617 while (!comp_param[idx].done) {
1618 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1621 qemu_mutex_unlock(&comp_done_lock);
1623 for (idx = 0; idx < thread_count; idx++) {
1624 qemu_mutex_lock(&comp_param[idx].mutex);
1625 if (!comp_param[idx].quit) {
1626 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1627 ram_counters.transferred += len;
1629 qemu_mutex_unlock(&comp_param[idx].mutex);
1633 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
1634 ram_addr_t offset)
1636 param->block = block;
1637 param->offset = offset;
1640 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
1641 ram_addr_t offset)
1643 int idx, thread_count, bytes_xmit = -1, pages = -1;
1645 thread_count = migrate_compress_threads();
1646 qemu_mutex_lock(&comp_done_lock);
1647 while (true) {
1648 for (idx = 0; idx < thread_count; idx++) {
1649 if (comp_param[idx].done) {
1650 comp_param[idx].done = false;
1651 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1652 qemu_mutex_lock(&comp_param[idx].mutex);
1653 set_compress_params(&comp_param[idx], block, offset);
1654 qemu_cond_signal(&comp_param[idx].cond);
1655 qemu_mutex_unlock(&comp_param[idx].mutex);
1656 pages = 1;
1657 ram_counters.normal++;
1658 ram_counters.transferred += bytes_xmit;
1659 break;
1662 if (pages > 0) {
1663 break;
1664 } else {
1665 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1668 qemu_mutex_unlock(&comp_done_lock);
1670 return pages;
1674 * find_dirty_block: find the next dirty page and update any state
1675 * associated with the search process.
1677 * Returns if a page is found
1679 * @rs: current RAM state
1680 * @pss: data about the state of the current dirty page scan
1681 * @again: set to false if the search has scanned the whole of RAM
1683 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
1685 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
1686 if (pss->complete_round && pss->block == rs->last_seen_block &&
1687 pss->page >= rs->last_page) {
1689 * We've been once around the RAM and haven't found anything.
1690 * Give up.
1692 *again = false;
1693 return false;
1695 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
1696 /* Didn't find anything in this RAM Block */
1697 pss->page = 0;
1698 pss->block = QLIST_NEXT_RCU(pss->block, next);
1699 if (!pss->block) {
1700 /* Hit the end of the list */
1701 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1702 /* Flag that we've looped */
1703 pss->complete_round = true;
1704 rs->ram_bulk_stage = false;
1705 if (migrate_use_xbzrle()) {
1706 /* If xbzrle is on, stop using the data compression at this
1707 * point. In theory, xbzrle can do better than compression.
1709 flush_compressed_data(rs);
1712 /* Didn't find anything this time, but try again on the new block */
1713 *again = true;
1714 return false;
1715 } else {
1716 /* Can go around again, but... */
1717 *again = true;
1718 /* We've found something so probably don't need to */
1719 return true;
1724 * unqueue_page: gets a page of the queue
1726 * Helper for 'get_queued_page' - gets a page off the queue
1728 * Returns the block of the page (or NULL if none available)
1730 * @rs: current RAM state
1731 * @offset: used to return the offset within the RAMBlock
1733 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
1735 RAMBlock *block = NULL;
1737 qemu_mutex_lock(&rs->src_page_req_mutex);
1738 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
1739 struct RAMSrcPageRequest *entry =
1740 QSIMPLEQ_FIRST(&rs->src_page_requests);
1741 block = entry->rb;
1742 *offset = entry->offset;
1744 if (entry->len > TARGET_PAGE_SIZE) {
1745 entry->len -= TARGET_PAGE_SIZE;
1746 entry->offset += TARGET_PAGE_SIZE;
1747 } else {
1748 memory_region_unref(block->mr);
1749 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1750 g_free(entry);
1751 migration_consume_urgent_request();
1754 qemu_mutex_unlock(&rs->src_page_req_mutex);
1756 return block;
1760 * get_queued_page: unqueue a page from the postocpy requests
1762 * Skips pages that are already sent (!dirty)
1764 * Returns if a queued page is found
1766 * @rs: current RAM state
1767 * @pss: data about the state of the current dirty page scan
1769 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
1771 RAMBlock *block;
1772 ram_addr_t offset;
1773 bool dirty;
1775 do {
1776 block = unqueue_page(rs, &offset);
1778 * We're sending this page, and since it's postcopy nothing else
1779 * will dirty it, and we must make sure it doesn't get sent again
1780 * even if this queue request was received after the background
1781 * search already sent it.
1783 if (block) {
1784 unsigned long page;
1786 page = offset >> TARGET_PAGE_BITS;
1787 dirty = test_bit(page, block->bmap);
1788 if (!dirty) {
1789 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
1790 page, test_bit(page, block->unsentmap));
1791 } else {
1792 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
1796 } while (block && !dirty);
1798 if (block) {
1800 * As soon as we start servicing pages out of order, then we have
1801 * to kill the bulk stage, since the bulk stage assumes
1802 * in (migration_bitmap_find_and_reset_dirty) that every page is
1803 * dirty, that's no longer true.
1805 rs->ram_bulk_stage = false;
1808 * We want the background search to continue from the queued page
1809 * since the guest is likely to want other pages near to the page
1810 * it just requested.
1812 pss->block = block;
1813 pss->page = offset >> TARGET_PAGE_BITS;
1816 return !!block;
1820 * migration_page_queue_free: drop any remaining pages in the ram
1821 * request queue
1823 * It should be empty at the end anyway, but in error cases there may
1824 * be some left. in case that there is any page left, we drop it.
1827 static void migration_page_queue_free(RAMState *rs)
1829 struct RAMSrcPageRequest *mspr, *next_mspr;
1830 /* This queue generally should be empty - but in the case of a failed
1831 * migration might have some droppings in.
1833 rcu_read_lock();
1834 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
1835 memory_region_unref(mspr->rb->mr);
1836 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1837 g_free(mspr);
1839 rcu_read_unlock();
1843 * ram_save_queue_pages: queue the page for transmission
1845 * A request from postcopy destination for example.
1847 * Returns zero on success or negative on error
1849 * @rbname: Name of the RAMBLock of the request. NULL means the
1850 * same that last one.
1851 * @start: starting address from the start of the RAMBlock
1852 * @len: length (in bytes) to send
1854 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
1856 RAMBlock *ramblock;
1857 RAMState *rs = ram_state;
1859 ram_counters.postcopy_requests++;
1860 rcu_read_lock();
1861 if (!rbname) {
1862 /* Reuse last RAMBlock */
1863 ramblock = rs->last_req_rb;
1865 if (!ramblock) {
1867 * Shouldn't happen, we can't reuse the last RAMBlock if
1868 * it's the 1st request.
1870 error_report("ram_save_queue_pages no previous block");
1871 goto err;
1873 } else {
1874 ramblock = qemu_ram_block_by_name(rbname);
1876 if (!ramblock) {
1877 /* We shouldn't be asked for a non-existent RAMBlock */
1878 error_report("ram_save_queue_pages no block '%s'", rbname);
1879 goto err;
1881 rs->last_req_rb = ramblock;
1883 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1884 if (start+len > ramblock->used_length) {
1885 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1886 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1887 __func__, start, len, ramblock->used_length);
1888 goto err;
1891 struct RAMSrcPageRequest *new_entry =
1892 g_malloc0(sizeof(struct RAMSrcPageRequest));
1893 new_entry->rb = ramblock;
1894 new_entry->offset = start;
1895 new_entry->len = len;
1897 memory_region_ref(ramblock->mr);
1898 qemu_mutex_lock(&rs->src_page_req_mutex);
1899 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
1900 migration_make_urgent_request();
1901 qemu_mutex_unlock(&rs->src_page_req_mutex);
1902 rcu_read_unlock();
1904 return 0;
1906 err:
1907 rcu_read_unlock();
1908 return -1;
1911 static bool save_page_use_compression(RAMState *rs)
1913 if (!migrate_use_compression()) {
1914 return false;
1918 * If xbzrle is on, stop using the data compression after first
1919 * round of migration even if compression is enabled. In theory,
1920 * xbzrle can do better than compression.
1922 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
1923 return true;
1926 return false;
1930 * ram_save_target_page: save one target page
1932 * Returns the number of pages written
1934 * @rs: current RAM state
1935 * @pss: data about the page we want to send
1936 * @last_stage: if we are at the completion stage
1938 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
1939 bool last_stage)
1941 RAMBlock *block = pss->block;
1942 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
1943 int res;
1945 if (control_save_page(rs, block, offset, &res)) {
1946 return res;
1950 * When starting the process of a new block, the first page of
1951 * the block should be sent out before other pages in the same
1952 * block, and all the pages in last block should have been sent
1953 * out, keeping this order is important, because the 'cont' flag
1954 * is used to avoid resending the block name.
1956 if (block != rs->last_sent_block && save_page_use_compression(rs)) {
1957 flush_compressed_data(rs);
1960 res = save_zero_page(rs, block, offset);
1961 if (res > 0) {
1962 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
1963 * page would be stale
1965 if (!save_page_use_compression(rs)) {
1966 XBZRLE_cache_lock();
1967 xbzrle_cache_zero_page(rs, block->offset + offset);
1968 XBZRLE_cache_unlock();
1970 ram_release_pages(block->idstr, offset, res);
1971 return res;
1975 * Make sure the first page is sent out before other pages.
1977 * we post it as normal page as compression will take much
1978 * CPU resource.
1980 if (block == rs->last_sent_block && save_page_use_compression(rs)) {
1981 return compress_page_with_multi_thread(rs, block, offset);
1984 return ram_save_page(rs, pss, last_stage);
1988 * ram_save_host_page: save a whole host page
1990 * Starting at *offset send pages up to the end of the current host
1991 * page. It's valid for the initial offset to point into the middle of
1992 * a host page in which case the remainder of the hostpage is sent.
1993 * Only dirty target pages are sent. Note that the host page size may
1994 * be a huge page for this block.
1995 * The saving stops at the boundary of the used_length of the block
1996 * if the RAMBlock isn't a multiple of the host page size.
1998 * Returns the number of pages written or negative on error
2000 * @rs: current RAM state
2001 * @ms: current migration state
2002 * @pss: data about the page we want to send
2003 * @last_stage: if we are at the completion stage
2005 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
2006 bool last_stage)
2008 int tmppages, pages = 0;
2009 size_t pagesize_bits =
2010 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2012 if (!qemu_ram_is_migratable(pss->block)) {
2013 error_report("block %s should not be migrated !", pss->block->idstr);
2014 return 0;
2017 do {
2018 /* Check the pages is dirty and if it is send it */
2019 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2020 pss->page++;
2021 continue;
2024 tmppages = ram_save_target_page(rs, pss, last_stage);
2025 if (tmppages < 0) {
2026 return tmppages;
2029 pages += tmppages;
2030 if (pss->block->unsentmap) {
2031 clear_bit(pss->page, pss->block->unsentmap);
2034 pss->page++;
2035 } while ((pss->page & (pagesize_bits - 1)) &&
2036 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
2038 /* The offset we leave with is the last one we looked at */
2039 pss->page--;
2040 return pages;
2044 * ram_find_and_save_block: finds a dirty page and sends it to f
2046 * Called within an RCU critical section.
2048 * Returns the number of pages written where zero means no dirty pages
2050 * @rs: current RAM state
2051 * @last_stage: if we are at the completion stage
2053 * On systems where host-page-size > target-page-size it will send all the
2054 * pages in a host page that are dirty.
2057 static int ram_find_and_save_block(RAMState *rs, bool last_stage)
2059 PageSearchStatus pss;
2060 int pages = 0;
2061 bool again, found;
2063 /* No dirty page as there is zero RAM */
2064 if (!ram_bytes_total()) {
2065 return pages;
2068 pss.block = rs->last_seen_block;
2069 pss.page = rs->last_page;
2070 pss.complete_round = false;
2072 if (!pss.block) {
2073 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2076 do {
2077 again = true;
2078 found = get_queued_page(rs, &pss);
2080 if (!found) {
2081 /* priority queue empty, so just search for something dirty */
2082 found = find_dirty_block(rs, &pss, &again);
2085 if (found) {
2086 pages = ram_save_host_page(rs, &pss, last_stage);
2088 } while (!pages && again);
2090 rs->last_seen_block = pss.block;
2091 rs->last_page = pss.page;
2093 return pages;
2096 void acct_update_position(QEMUFile *f, size_t size, bool zero)
2098 uint64_t pages = size / TARGET_PAGE_SIZE;
2100 if (zero) {
2101 ram_counters.duplicate += pages;
2102 } else {
2103 ram_counters.normal += pages;
2104 ram_counters.transferred += size;
2105 qemu_update_position(f, size);
2109 uint64_t ram_bytes_total(void)
2111 RAMBlock *block;
2112 uint64_t total = 0;
2114 rcu_read_lock();
2115 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2116 total += block->used_length;
2118 rcu_read_unlock();
2119 return total;
2122 static void xbzrle_load_setup(void)
2124 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2127 static void xbzrle_load_cleanup(void)
2129 g_free(XBZRLE.decoded_buf);
2130 XBZRLE.decoded_buf = NULL;
2133 static void ram_state_cleanup(RAMState **rsp)
2135 if (*rsp) {
2136 migration_page_queue_free(*rsp);
2137 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2138 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2139 g_free(*rsp);
2140 *rsp = NULL;
2144 static void xbzrle_cleanup(void)
2146 XBZRLE_cache_lock();
2147 if (XBZRLE.cache) {
2148 cache_fini(XBZRLE.cache);
2149 g_free(XBZRLE.encoded_buf);
2150 g_free(XBZRLE.current_buf);
2151 g_free(XBZRLE.zero_target_page);
2152 XBZRLE.cache = NULL;
2153 XBZRLE.encoded_buf = NULL;
2154 XBZRLE.current_buf = NULL;
2155 XBZRLE.zero_target_page = NULL;
2157 XBZRLE_cache_unlock();
2160 static void ram_save_cleanup(void *opaque)
2162 RAMState **rsp = opaque;
2163 RAMBlock *block;
2165 /* caller have hold iothread lock or is in a bh, so there is
2166 * no writing race against this migration_bitmap
2168 memory_global_dirty_log_stop();
2170 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2171 g_free(block->bmap);
2172 block->bmap = NULL;
2173 g_free(block->unsentmap);
2174 block->unsentmap = NULL;
2177 xbzrle_cleanup();
2178 compress_threads_save_cleanup();
2179 ram_state_cleanup(rsp);
2182 static void ram_state_reset(RAMState *rs)
2184 rs->last_seen_block = NULL;
2185 rs->last_sent_block = NULL;
2186 rs->last_page = 0;
2187 rs->last_version = ram_list.version;
2188 rs->ram_bulk_stage = true;
2191 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2194 * 'expected' is the value you expect the bitmap mostly to be full
2195 * of; it won't bother printing lines that are all this value.
2196 * If 'todump' is null the migration bitmap is dumped.
2198 void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2199 unsigned long pages)
2201 int64_t cur;
2202 int64_t linelen = 128;
2203 char linebuf[129];
2205 for (cur = 0; cur < pages; cur += linelen) {
2206 int64_t curb;
2207 bool found = false;
2209 * Last line; catch the case where the line length
2210 * is longer than remaining ram
2212 if (cur + linelen > pages) {
2213 linelen = pages - cur;
2215 for (curb = 0; curb < linelen; curb++) {
2216 bool thisbit = test_bit(cur + curb, todump);
2217 linebuf[curb] = thisbit ? '1' : '.';
2218 found = found || (thisbit != expected);
2220 if (found) {
2221 linebuf[curb] = '\0';
2222 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2227 /* **** functions for postcopy ***** */
2229 void ram_postcopy_migrated_memory_release(MigrationState *ms)
2231 struct RAMBlock *block;
2233 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2234 unsigned long *bitmap = block->bmap;
2235 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2236 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2238 while (run_start < range) {
2239 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2240 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
2241 (run_end - run_start) << TARGET_PAGE_BITS);
2242 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2248 * postcopy_send_discard_bm_ram: discard a RAMBlock
2250 * Returns zero on success
2252 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2253 * Note: At this point the 'unsentmap' is the processed bitmap combined
2254 * with the dirtymap; so a '1' means it's either dirty or unsent.
2256 * @ms: current migration state
2257 * @pds: state for postcopy
2258 * @start: RAMBlock starting page
2259 * @length: RAMBlock size
2261 static int postcopy_send_discard_bm_ram(MigrationState *ms,
2262 PostcopyDiscardState *pds,
2263 RAMBlock *block)
2265 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2266 unsigned long current;
2267 unsigned long *unsentmap = block->unsentmap;
2269 for (current = 0; current < end; ) {
2270 unsigned long one = find_next_bit(unsentmap, end, current);
2272 if (one <= end) {
2273 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
2274 unsigned long discard_length;
2276 if (zero >= end) {
2277 discard_length = end - one;
2278 } else {
2279 discard_length = zero - one;
2281 if (discard_length) {
2282 postcopy_discard_send_range(ms, pds, one, discard_length);
2284 current = one + discard_length;
2285 } else {
2286 current = one;
2290 return 0;
2294 * postcopy_each_ram_send_discard: discard all RAMBlocks
2296 * Returns 0 for success or negative for error
2298 * Utility for the outgoing postcopy code.
2299 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2300 * passing it bitmap indexes and name.
2301 * (qemu_ram_foreach_block ends up passing unscaled lengths
2302 * which would mean postcopy code would have to deal with target page)
2304 * @ms: current migration state
2306 static int postcopy_each_ram_send_discard(MigrationState *ms)
2308 struct RAMBlock *block;
2309 int ret;
2311 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2312 PostcopyDiscardState *pds =
2313 postcopy_discard_send_init(ms, block->idstr);
2316 * Postcopy sends chunks of bitmap over the wire, but it
2317 * just needs indexes at this point, avoids it having
2318 * target page specific code.
2320 ret = postcopy_send_discard_bm_ram(ms, pds, block);
2321 postcopy_discard_send_finish(ms, pds);
2322 if (ret) {
2323 return ret;
2327 return 0;
2331 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2333 * Helper for postcopy_chunk_hostpages; it's called twice to
2334 * canonicalize the two bitmaps, that are similar, but one is
2335 * inverted.
2337 * Postcopy requires that all target pages in a hostpage are dirty or
2338 * clean, not a mix. This function canonicalizes the bitmaps.
2340 * @ms: current migration state
2341 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2342 * otherwise we need to canonicalize partially dirty host pages
2343 * @block: block that contains the page we want to canonicalize
2344 * @pds: state for postcopy
2346 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
2347 RAMBlock *block,
2348 PostcopyDiscardState *pds)
2350 RAMState *rs = ram_state;
2351 unsigned long *bitmap = block->bmap;
2352 unsigned long *unsentmap = block->unsentmap;
2353 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2354 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2355 unsigned long run_start;
2357 if (block->page_size == TARGET_PAGE_SIZE) {
2358 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2359 return;
2362 if (unsent_pass) {
2363 /* Find a sent page */
2364 run_start = find_next_zero_bit(unsentmap, pages, 0);
2365 } else {
2366 /* Find a dirty page */
2367 run_start = find_next_bit(bitmap, pages, 0);
2370 while (run_start < pages) {
2371 bool do_fixup = false;
2372 unsigned long fixup_start_addr;
2373 unsigned long host_offset;
2376 * If the start of this run of pages is in the middle of a host
2377 * page, then we need to fixup this host page.
2379 host_offset = run_start % host_ratio;
2380 if (host_offset) {
2381 do_fixup = true;
2382 run_start -= host_offset;
2383 fixup_start_addr = run_start;
2384 /* For the next pass */
2385 run_start = run_start + host_ratio;
2386 } else {
2387 /* Find the end of this run */
2388 unsigned long run_end;
2389 if (unsent_pass) {
2390 run_end = find_next_bit(unsentmap, pages, run_start + 1);
2391 } else {
2392 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
2395 * If the end isn't at the start of a host page, then the
2396 * run doesn't finish at the end of a host page
2397 * and we need to discard.
2399 host_offset = run_end % host_ratio;
2400 if (host_offset) {
2401 do_fixup = true;
2402 fixup_start_addr = run_end - host_offset;
2404 * This host page has gone, the next loop iteration starts
2405 * from after the fixup
2407 run_start = fixup_start_addr + host_ratio;
2408 } else {
2410 * No discards on this iteration, next loop starts from
2411 * next sent/dirty page
2413 run_start = run_end + 1;
2417 if (do_fixup) {
2418 unsigned long page;
2420 /* Tell the destination to discard this page */
2421 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
2422 /* For the unsent_pass we:
2423 * discard partially sent pages
2424 * For the !unsent_pass (dirty) we:
2425 * discard partially dirty pages that were sent
2426 * (any partially sent pages were already discarded
2427 * by the previous unsent_pass)
2429 postcopy_discard_send_range(ms, pds, fixup_start_addr,
2430 host_ratio);
2433 /* Clean up the bitmap */
2434 for (page = fixup_start_addr;
2435 page < fixup_start_addr + host_ratio; page++) {
2436 /* All pages in this host page are now not sent */
2437 set_bit(page, unsentmap);
2440 * Remark them as dirty, updating the count for any pages
2441 * that weren't previously dirty.
2443 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
2447 if (unsent_pass) {
2448 /* Find the next sent page for the next iteration */
2449 run_start = find_next_zero_bit(unsentmap, pages, run_start);
2450 } else {
2451 /* Find the next dirty page for the next iteration */
2452 run_start = find_next_bit(bitmap, pages, run_start);
2458 * postcopy_chuck_hostpages: discrad any partially sent host page
2460 * Utility for the outgoing postcopy code.
2462 * Discard any partially sent host-page size chunks, mark any partially
2463 * dirty host-page size chunks as all dirty. In this case the host-page
2464 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2466 * Returns zero on success
2468 * @ms: current migration state
2469 * @block: block we want to work with
2471 static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
2473 PostcopyDiscardState *pds =
2474 postcopy_discard_send_init(ms, block->idstr);
2476 /* First pass: Discard all partially sent host pages */
2477 postcopy_chunk_hostpages_pass(ms, true, block, pds);
2479 * Second pass: Ensure that all partially dirty host pages are made
2480 * fully dirty.
2482 postcopy_chunk_hostpages_pass(ms, false, block, pds);
2484 postcopy_discard_send_finish(ms, pds);
2485 return 0;
2489 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2491 * Returns zero on success
2493 * Transmit the set of pages to be discarded after precopy to the target
2494 * these are pages that:
2495 * a) Have been previously transmitted but are now dirty again
2496 * b) Pages that have never been transmitted, this ensures that
2497 * any pages on the destination that have been mapped by background
2498 * tasks get discarded (transparent huge pages is the specific concern)
2499 * Hopefully this is pretty sparse
2501 * @ms: current migration state
2503 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
2505 RAMState *rs = ram_state;
2506 RAMBlock *block;
2507 int ret;
2509 rcu_read_lock();
2511 /* This should be our last sync, the src is now paused */
2512 migration_bitmap_sync(rs);
2514 /* Easiest way to make sure we don't resume in the middle of a host-page */
2515 rs->last_seen_block = NULL;
2516 rs->last_sent_block = NULL;
2517 rs->last_page = 0;
2519 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2520 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2521 unsigned long *bitmap = block->bmap;
2522 unsigned long *unsentmap = block->unsentmap;
2524 if (!unsentmap) {
2525 /* We don't have a safe way to resize the sentmap, so
2526 * if the bitmap was resized it will be NULL at this
2527 * point.
2529 error_report("migration ram resized during precopy phase");
2530 rcu_read_unlock();
2531 return -EINVAL;
2533 /* Deal with TPS != HPS and huge pages */
2534 ret = postcopy_chunk_hostpages(ms, block);
2535 if (ret) {
2536 rcu_read_unlock();
2537 return ret;
2541 * Update the unsentmap to be unsentmap = unsentmap | dirty
2543 bitmap_or(unsentmap, unsentmap, bitmap, pages);
2544 #ifdef DEBUG_POSTCOPY
2545 ram_debug_dump_bitmap(unsentmap, true, pages);
2546 #endif
2548 trace_ram_postcopy_send_discard_bitmap();
2550 ret = postcopy_each_ram_send_discard(ms);
2551 rcu_read_unlock();
2553 return ret;
2557 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2559 * Returns zero on success
2561 * @rbname: name of the RAMBlock of the request. NULL means the
2562 * same that last one.
2563 * @start: RAMBlock starting page
2564 * @length: RAMBlock size
2566 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
2568 int ret = -1;
2570 trace_ram_discard_range(rbname, start, length);
2572 rcu_read_lock();
2573 RAMBlock *rb = qemu_ram_block_by_name(rbname);
2575 if (!rb) {
2576 error_report("ram_discard_range: Failed to find block '%s'", rbname);
2577 goto err;
2580 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2581 length >> qemu_target_page_bits());
2582 ret = ram_block_discard_range(rb, start, length);
2584 err:
2585 rcu_read_unlock();
2587 return ret;
2591 * For every allocation, we will try not to crash the VM if the
2592 * allocation failed.
2594 static int xbzrle_init(void)
2596 Error *local_err = NULL;
2598 if (!migrate_use_xbzrle()) {
2599 return 0;
2602 XBZRLE_cache_lock();
2604 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2605 if (!XBZRLE.zero_target_page) {
2606 error_report("%s: Error allocating zero page", __func__);
2607 goto err_out;
2610 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2611 TARGET_PAGE_SIZE, &local_err);
2612 if (!XBZRLE.cache) {
2613 error_report_err(local_err);
2614 goto free_zero_page;
2617 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2618 if (!XBZRLE.encoded_buf) {
2619 error_report("%s: Error allocating encoded_buf", __func__);
2620 goto free_cache;
2623 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2624 if (!XBZRLE.current_buf) {
2625 error_report("%s: Error allocating current_buf", __func__);
2626 goto free_encoded_buf;
2629 /* We are all good */
2630 XBZRLE_cache_unlock();
2631 return 0;
2633 free_encoded_buf:
2634 g_free(XBZRLE.encoded_buf);
2635 XBZRLE.encoded_buf = NULL;
2636 free_cache:
2637 cache_fini(XBZRLE.cache);
2638 XBZRLE.cache = NULL;
2639 free_zero_page:
2640 g_free(XBZRLE.zero_target_page);
2641 XBZRLE.zero_target_page = NULL;
2642 err_out:
2643 XBZRLE_cache_unlock();
2644 return -ENOMEM;
2647 static int ram_state_init(RAMState **rsp)
2649 *rsp = g_try_new0(RAMState, 1);
2651 if (!*rsp) {
2652 error_report("%s: Init ramstate fail", __func__);
2653 return -1;
2656 qemu_mutex_init(&(*rsp)->bitmap_mutex);
2657 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2658 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
2661 * Count the total number of pages used by ram blocks not including any
2662 * gaps due to alignment or unplugs.
2664 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
2666 ram_state_reset(*rsp);
2668 return 0;
2671 static void ram_list_init_bitmaps(void)
2673 RAMBlock *block;
2674 unsigned long pages;
2676 /* Skip setting bitmap if there is no RAM */
2677 if (ram_bytes_total()) {
2678 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2679 pages = block->max_length >> TARGET_PAGE_BITS;
2680 block->bmap = bitmap_new(pages);
2681 bitmap_set(block->bmap, 0, pages);
2682 if (migrate_postcopy_ram()) {
2683 block->unsentmap = bitmap_new(pages);
2684 bitmap_set(block->unsentmap, 0, pages);
2690 static void ram_init_bitmaps(RAMState *rs)
2692 /* For memory_global_dirty_log_start below. */
2693 qemu_mutex_lock_iothread();
2694 qemu_mutex_lock_ramlist();
2695 rcu_read_lock();
2697 ram_list_init_bitmaps();
2698 memory_global_dirty_log_start();
2699 migration_bitmap_sync(rs);
2701 rcu_read_unlock();
2702 qemu_mutex_unlock_ramlist();
2703 qemu_mutex_unlock_iothread();
2706 static int ram_init_all(RAMState **rsp)
2708 if (ram_state_init(rsp)) {
2709 return -1;
2712 if (xbzrle_init()) {
2713 ram_state_cleanup(rsp);
2714 return -1;
2717 ram_init_bitmaps(*rsp);
2719 return 0;
2722 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
2724 RAMBlock *block;
2725 uint64_t pages = 0;
2728 * Postcopy is not using xbzrle/compression, so no need for that.
2729 * Also, since source are already halted, we don't need to care
2730 * about dirty page logging as well.
2733 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2734 pages += bitmap_count_one(block->bmap,
2735 block->used_length >> TARGET_PAGE_BITS);
2738 /* This may not be aligned with current bitmaps. Recalculate. */
2739 rs->migration_dirty_pages = pages;
2741 rs->last_seen_block = NULL;
2742 rs->last_sent_block = NULL;
2743 rs->last_page = 0;
2744 rs->last_version = ram_list.version;
2746 * Disable the bulk stage, otherwise we'll resend the whole RAM no
2747 * matter what we have sent.
2749 rs->ram_bulk_stage = false;
2751 /* Update RAMState cache of output QEMUFile */
2752 rs->f = out;
2754 trace_ram_state_resume_prepare(pages);
2758 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
2759 * long-running RCU critical section. When rcu-reclaims in the code
2760 * start to become numerous it will be necessary to reduce the
2761 * granularity of these critical sections.
2765 * ram_save_setup: Setup RAM for migration
2767 * Returns zero to indicate success and negative for error
2769 * @f: QEMUFile where to send the data
2770 * @opaque: RAMState pointer
2772 static int ram_save_setup(QEMUFile *f, void *opaque)
2774 RAMState **rsp = opaque;
2775 RAMBlock *block;
2777 if (compress_threads_save_setup()) {
2778 return -1;
2781 /* migration has already setup the bitmap, reuse it. */
2782 if (!migration_in_colo_state()) {
2783 if (ram_init_all(rsp) != 0) {
2784 compress_threads_save_cleanup();
2785 return -1;
2788 (*rsp)->f = f;
2790 rcu_read_lock();
2792 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
2794 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2795 qemu_put_byte(f, strlen(block->idstr));
2796 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
2797 qemu_put_be64(f, block->used_length);
2798 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
2799 qemu_put_be64(f, block->page_size);
2803 rcu_read_unlock();
2805 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
2806 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
2808 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2810 return 0;
2814 * ram_save_iterate: iterative stage for migration
2816 * Returns zero to indicate success and negative for error
2818 * @f: QEMUFile where to send the data
2819 * @opaque: RAMState pointer
2821 static int ram_save_iterate(QEMUFile *f, void *opaque)
2823 RAMState **temp = opaque;
2824 RAMState *rs = *temp;
2825 int ret;
2826 int i;
2827 int64_t t0;
2828 int done = 0;
2830 if (blk_mig_bulk_active()) {
2831 /* Avoid transferring ram during bulk phase of block migration as
2832 * the bulk phase will usually take a long time and transferring
2833 * ram updates during that time is pointless. */
2834 goto out;
2837 rcu_read_lock();
2838 if (ram_list.version != rs->last_version) {
2839 ram_state_reset(rs);
2842 /* Read version before ram_list.blocks */
2843 smp_rmb();
2845 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
2847 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2848 i = 0;
2849 while ((ret = qemu_file_rate_limit(f)) == 0 ||
2850 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2851 int pages;
2853 if (qemu_file_get_error(f)) {
2854 break;
2857 pages = ram_find_and_save_block(rs, false);
2858 /* no more pages to sent */
2859 if (pages == 0) {
2860 done = 1;
2861 break;
2863 rs->iterations++;
2865 /* we want to check in the 1st loop, just in case it was the 1st time
2866 and we had to sync the dirty bitmap.
2867 qemu_get_clock_ns() is a bit expensive, so we only check each some
2868 iterations
2870 if ((i & 63) == 0) {
2871 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2872 if (t1 > MAX_WAIT) {
2873 trace_ram_save_iterate_big_wait(t1, i);
2874 break;
2877 i++;
2879 flush_compressed_data(rs);
2880 rcu_read_unlock();
2883 * Must occur before EOS (or any QEMUFile operation)
2884 * because of RDMA protocol.
2886 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2888 out:
2889 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2890 ram_counters.transferred += 8;
2892 ret = qemu_file_get_error(f);
2893 if (ret < 0) {
2894 return ret;
2897 return done;
2901 * ram_save_complete: function called to send the remaining amount of ram
2903 * Returns zero to indicate success
2905 * Called with iothread lock
2907 * @f: QEMUFile where to send the data
2908 * @opaque: RAMState pointer
2910 static int ram_save_complete(QEMUFile *f, void *opaque)
2912 RAMState **temp = opaque;
2913 RAMState *rs = *temp;
2915 rcu_read_lock();
2917 if (!migration_in_postcopy()) {
2918 migration_bitmap_sync(rs);
2921 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2923 /* try transferring iterative blocks of memory */
2925 /* flush all remaining blocks regardless of rate limiting */
2926 while (true) {
2927 int pages;
2929 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
2930 /* no more blocks to sent */
2931 if (pages == 0) {
2932 break;
2936 flush_compressed_data(rs);
2937 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
2939 rcu_read_unlock();
2941 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2943 return 0;
2946 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2947 uint64_t *res_precopy_only,
2948 uint64_t *res_compatible,
2949 uint64_t *res_postcopy_only)
2951 RAMState **temp = opaque;
2952 RAMState *rs = *temp;
2953 uint64_t remaining_size;
2955 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
2957 if (!migration_in_postcopy() &&
2958 remaining_size < max_size) {
2959 qemu_mutex_lock_iothread();
2960 rcu_read_lock();
2961 migration_bitmap_sync(rs);
2962 rcu_read_unlock();
2963 qemu_mutex_unlock_iothread();
2964 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
2967 if (migrate_postcopy_ram()) {
2968 /* We can do postcopy, and all the data is postcopiable */
2969 *res_compatible += remaining_size;
2970 } else {
2971 *res_precopy_only += remaining_size;
2975 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2977 unsigned int xh_len;
2978 int xh_flags;
2979 uint8_t *loaded_data;
2981 /* extract RLE header */
2982 xh_flags = qemu_get_byte(f);
2983 xh_len = qemu_get_be16(f);
2985 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2986 error_report("Failed to load XBZRLE page - wrong compression!");
2987 return -1;
2990 if (xh_len > TARGET_PAGE_SIZE) {
2991 error_report("Failed to load XBZRLE page - len overflow!");
2992 return -1;
2994 loaded_data = XBZRLE.decoded_buf;
2995 /* load data and decode */
2996 /* it can change loaded_data to point to an internal buffer */
2997 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
2999 /* decode RLE */
3000 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3001 TARGET_PAGE_SIZE) == -1) {
3002 error_report("Failed to load XBZRLE page - decode error!");
3003 return -1;
3006 return 0;
3010 * ram_block_from_stream: read a RAMBlock id from the migration stream
3012 * Must be called from within a rcu critical section.
3014 * Returns a pointer from within the RCU-protected ram_list.
3016 * @f: QEMUFile where to read the data from
3017 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3019 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
3021 static RAMBlock *block = NULL;
3022 char id[256];
3023 uint8_t len;
3025 if (flags & RAM_SAVE_FLAG_CONTINUE) {
3026 if (!block) {
3027 error_report("Ack, bad migration stream!");
3028 return NULL;
3030 return block;
3033 len = qemu_get_byte(f);
3034 qemu_get_buffer(f, (uint8_t *)id, len);
3035 id[len] = 0;
3037 block = qemu_ram_block_by_name(id);
3038 if (!block) {
3039 error_report("Can't find block %s", id);
3040 return NULL;
3043 if (!qemu_ram_is_migratable(block)) {
3044 error_report("block %s should not be migrated !", id);
3045 return NULL;
3048 return block;
3051 static inline void *host_from_ram_block_offset(RAMBlock *block,
3052 ram_addr_t offset)
3054 if (!offset_in_ramblock(block, offset)) {
3055 return NULL;
3058 return block->host + offset;
3062 * ram_handle_compressed: handle the zero page case
3064 * If a page (or a whole RDMA chunk) has been
3065 * determined to be zero, then zap it.
3067 * @host: host address for the zero page
3068 * @ch: what the page is filled from. We only support zero
3069 * @size: size of the zero page
3071 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3073 if (ch != 0 || !is_zero_range(host, size)) {
3074 memset(host, ch, size);
3078 /* return the size after decompression, or negative value on error */
3079 static int
3080 qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3081 const uint8_t *source, size_t source_len)
3083 int err;
3085 err = inflateReset(stream);
3086 if (err != Z_OK) {
3087 return -1;
3090 stream->avail_in = source_len;
3091 stream->next_in = (uint8_t *)source;
3092 stream->avail_out = dest_len;
3093 stream->next_out = dest;
3095 err = inflate(stream, Z_NO_FLUSH);
3096 if (err != Z_STREAM_END) {
3097 return -1;
3100 return stream->total_out;
3103 static void *do_data_decompress(void *opaque)
3105 DecompressParam *param = opaque;
3106 unsigned long pagesize;
3107 uint8_t *des;
3108 int len, ret;
3110 qemu_mutex_lock(&param->mutex);
3111 while (!param->quit) {
3112 if (param->des) {
3113 des = param->des;
3114 len = param->len;
3115 param->des = 0;
3116 qemu_mutex_unlock(&param->mutex);
3118 pagesize = TARGET_PAGE_SIZE;
3120 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3121 param->compbuf, len);
3122 if (ret < 0 && migrate_get_current()->decompress_error_check) {
3123 error_report("decompress data failed");
3124 qemu_file_set_error(decomp_file, ret);
3127 qemu_mutex_lock(&decomp_done_lock);
3128 param->done = true;
3129 qemu_cond_signal(&decomp_done_cond);
3130 qemu_mutex_unlock(&decomp_done_lock);
3132 qemu_mutex_lock(&param->mutex);
3133 } else {
3134 qemu_cond_wait(&param->cond, &param->mutex);
3137 qemu_mutex_unlock(&param->mutex);
3139 return NULL;
3142 static int wait_for_decompress_done(void)
3144 int idx, thread_count;
3146 if (!migrate_use_compression()) {
3147 return 0;
3150 thread_count = migrate_decompress_threads();
3151 qemu_mutex_lock(&decomp_done_lock);
3152 for (idx = 0; idx < thread_count; idx++) {
3153 while (!decomp_param[idx].done) {
3154 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3157 qemu_mutex_unlock(&decomp_done_lock);
3158 return qemu_file_get_error(decomp_file);
3161 static void compress_threads_load_cleanup(void)
3163 int i, thread_count;
3165 if (!migrate_use_compression()) {
3166 return;
3168 thread_count = migrate_decompress_threads();
3169 for (i = 0; i < thread_count; i++) {
3171 * we use it as a indicator which shows if the thread is
3172 * properly init'd or not
3174 if (!decomp_param[i].compbuf) {
3175 break;
3178 qemu_mutex_lock(&decomp_param[i].mutex);
3179 decomp_param[i].quit = true;
3180 qemu_cond_signal(&decomp_param[i].cond);
3181 qemu_mutex_unlock(&decomp_param[i].mutex);
3183 for (i = 0; i < thread_count; i++) {
3184 if (!decomp_param[i].compbuf) {
3185 break;
3188 qemu_thread_join(decompress_threads + i);
3189 qemu_mutex_destroy(&decomp_param[i].mutex);
3190 qemu_cond_destroy(&decomp_param[i].cond);
3191 inflateEnd(&decomp_param[i].stream);
3192 g_free(decomp_param[i].compbuf);
3193 decomp_param[i].compbuf = NULL;
3195 g_free(decompress_threads);
3196 g_free(decomp_param);
3197 decompress_threads = NULL;
3198 decomp_param = NULL;
3199 decomp_file = NULL;
3202 static int compress_threads_load_setup(QEMUFile *f)
3204 int i, thread_count;
3206 if (!migrate_use_compression()) {
3207 return 0;
3210 thread_count = migrate_decompress_threads();
3211 decompress_threads = g_new0(QemuThread, thread_count);
3212 decomp_param = g_new0(DecompressParam, thread_count);
3213 qemu_mutex_init(&decomp_done_lock);
3214 qemu_cond_init(&decomp_done_cond);
3215 decomp_file = f;
3216 for (i = 0; i < thread_count; i++) {
3217 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3218 goto exit;
3221 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3222 qemu_mutex_init(&decomp_param[i].mutex);
3223 qemu_cond_init(&decomp_param[i].cond);
3224 decomp_param[i].done = true;
3225 decomp_param[i].quit = false;
3226 qemu_thread_create(decompress_threads + i, "decompress",
3227 do_data_decompress, decomp_param + i,
3228 QEMU_THREAD_JOINABLE);
3230 return 0;
3231 exit:
3232 compress_threads_load_cleanup();
3233 return -1;
3236 static void decompress_data_with_multi_threads(QEMUFile *f,
3237 void *host, int len)
3239 int idx, thread_count;
3241 thread_count = migrate_decompress_threads();
3242 qemu_mutex_lock(&decomp_done_lock);
3243 while (true) {
3244 for (idx = 0; idx < thread_count; idx++) {
3245 if (decomp_param[idx].done) {
3246 decomp_param[idx].done = false;
3247 qemu_mutex_lock(&decomp_param[idx].mutex);
3248 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
3249 decomp_param[idx].des = host;
3250 decomp_param[idx].len = len;
3251 qemu_cond_signal(&decomp_param[idx].cond);
3252 qemu_mutex_unlock(&decomp_param[idx].mutex);
3253 break;
3256 if (idx < thread_count) {
3257 break;
3258 } else {
3259 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3262 qemu_mutex_unlock(&decomp_done_lock);
3266 * ram_load_setup: Setup RAM for migration incoming side
3268 * Returns zero to indicate success and negative for error
3270 * @f: QEMUFile where to receive the data
3271 * @opaque: RAMState pointer
3273 static int ram_load_setup(QEMUFile *f, void *opaque)
3275 if (compress_threads_load_setup(f)) {
3276 return -1;
3279 xbzrle_load_setup();
3280 ramblock_recv_map_init();
3281 return 0;
3284 static int ram_load_cleanup(void *opaque)
3286 RAMBlock *rb;
3287 xbzrle_load_cleanup();
3288 compress_threads_load_cleanup();
3290 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
3291 g_free(rb->receivedmap);
3292 rb->receivedmap = NULL;
3294 return 0;
3298 * ram_postcopy_incoming_init: allocate postcopy data structures
3300 * Returns 0 for success and negative if there was one error
3302 * @mis: current migration incoming state
3304 * Allocate data structures etc needed by incoming migration with
3305 * postcopy-ram. postcopy-ram's similarly names
3306 * postcopy_ram_incoming_init does the work.
3308 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3310 unsigned long ram_pages = last_ram_page();
3312 return postcopy_ram_incoming_init(mis, ram_pages);
3316 * ram_load_postcopy: load a page in postcopy case
3318 * Returns 0 for success or -errno in case of error
3320 * Called in postcopy mode by ram_load().
3321 * rcu_read_lock is taken prior to this being called.
3323 * @f: QEMUFile where to send the data
3325 static int ram_load_postcopy(QEMUFile *f)
3327 int flags = 0, ret = 0;
3328 bool place_needed = false;
3329 bool matching_page_sizes = false;
3330 MigrationIncomingState *mis = migration_incoming_get_current();
3331 /* Temporary page that is later 'placed' */
3332 void *postcopy_host_page = postcopy_get_tmp_page(mis);
3333 void *last_host = NULL;
3334 bool all_zero = false;
3336 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3337 ram_addr_t addr;
3338 void *host = NULL;
3339 void *page_buffer = NULL;
3340 void *place_source = NULL;
3341 RAMBlock *block = NULL;
3342 uint8_t ch;
3344 addr = qemu_get_be64(f);
3347 * If qemu file error, we should stop here, and then "addr"
3348 * may be invalid
3350 ret = qemu_file_get_error(f);
3351 if (ret) {
3352 break;
3355 flags = addr & ~TARGET_PAGE_MASK;
3356 addr &= TARGET_PAGE_MASK;
3358 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
3359 place_needed = false;
3360 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
3361 block = ram_block_from_stream(f, flags);
3363 host = host_from_ram_block_offset(block, addr);
3364 if (!host) {
3365 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3366 ret = -EINVAL;
3367 break;
3369 matching_page_sizes = block->page_size == TARGET_PAGE_SIZE;
3371 * Postcopy requires that we place whole host pages atomically;
3372 * these may be huge pages for RAMBlocks that are backed by
3373 * hugetlbfs.
3374 * To make it atomic, the data is read into a temporary page
3375 * that's moved into place later.
3376 * The migration protocol uses, possibly smaller, target-pages
3377 * however the source ensures it always sends all the components
3378 * of a host page in order.
3380 page_buffer = postcopy_host_page +
3381 ((uintptr_t)host & (block->page_size - 1));
3382 /* If all TP are zero then we can optimise the place */
3383 if (!((uintptr_t)host & (block->page_size - 1))) {
3384 all_zero = true;
3385 } else {
3386 /* not the 1st TP within the HP */
3387 if (host != (last_host + TARGET_PAGE_SIZE)) {
3388 error_report("Non-sequential target page %p/%p",
3389 host, last_host);
3390 ret = -EINVAL;
3391 break;
3397 * If it's the last part of a host page then we place the host
3398 * page
3400 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
3401 (block->page_size - 1)) == 0;
3402 place_source = postcopy_host_page;
3404 last_host = host;
3406 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3407 case RAM_SAVE_FLAG_ZERO:
3408 ch = qemu_get_byte(f);
3409 memset(page_buffer, ch, TARGET_PAGE_SIZE);
3410 if (ch) {
3411 all_zero = false;
3413 break;
3415 case RAM_SAVE_FLAG_PAGE:
3416 all_zero = false;
3417 if (!place_needed || !matching_page_sizes) {
3418 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3419 } else {
3420 /* Avoids the qemu_file copy during postcopy, which is
3421 * going to do a copy later; can only do it when we
3422 * do this read in one go (matching page sizes)
3424 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3425 TARGET_PAGE_SIZE);
3427 break;
3428 case RAM_SAVE_FLAG_EOS:
3429 /* normal exit */
3430 break;
3431 default:
3432 error_report("Unknown combination of migration flags: %#x"
3433 " (postcopy mode)", flags);
3434 ret = -EINVAL;
3435 break;
3438 /* Detect for any possible file errors */
3439 if (!ret && qemu_file_get_error(f)) {
3440 ret = qemu_file_get_error(f);
3443 if (!ret && place_needed) {
3444 /* This gets called at the last target page in the host page */
3445 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
3447 if (all_zero) {
3448 ret = postcopy_place_page_zero(mis, place_dest,
3449 block);
3450 } else {
3451 ret = postcopy_place_page(mis, place_dest,
3452 place_source, block);
3457 return ret;
3460 static bool postcopy_is_advised(void)
3462 PostcopyState ps = postcopy_state_get();
3463 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
3466 static bool postcopy_is_running(void)
3468 PostcopyState ps = postcopy_state_get();
3469 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3472 static int ram_load(QEMUFile *f, void *opaque, int version_id)
3474 int flags = 0, ret = 0, invalid_flags = 0;
3475 static uint64_t seq_iter;
3476 int len = 0;
3478 * If system is running in postcopy mode, page inserts to host memory must
3479 * be atomic
3481 bool postcopy_running = postcopy_is_running();
3482 /* ADVISE is earlier, it shows the source has the postcopy capability on */
3483 bool postcopy_advised = postcopy_is_advised();
3485 seq_iter++;
3487 if (version_id != 4) {
3488 ret = -EINVAL;
3491 if (!migrate_use_compression()) {
3492 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
3494 /* This RCU critical section can be very long running.
3495 * When RCU reclaims in the code start to become numerous,
3496 * it will be necessary to reduce the granularity of this
3497 * critical section.
3499 rcu_read_lock();
3501 if (postcopy_running) {
3502 ret = ram_load_postcopy(f);
3505 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3506 ram_addr_t addr, total_ram_bytes;
3507 void *host = NULL;
3508 uint8_t ch;
3510 addr = qemu_get_be64(f);
3511 flags = addr & ~TARGET_PAGE_MASK;
3512 addr &= TARGET_PAGE_MASK;
3514 if (flags & invalid_flags) {
3515 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
3516 error_report("Received an unexpected compressed page");
3519 ret = -EINVAL;
3520 break;
3523 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
3524 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
3525 RAMBlock *block = ram_block_from_stream(f, flags);
3527 host = host_from_ram_block_offset(block, addr);
3528 if (!host) {
3529 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3530 ret = -EINVAL;
3531 break;
3533 ramblock_recv_bitmap_set(block, host);
3534 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
3537 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3538 case RAM_SAVE_FLAG_MEM_SIZE:
3539 /* Synchronize RAM block list */
3540 total_ram_bytes = addr;
3541 while (!ret && total_ram_bytes) {
3542 RAMBlock *block;
3543 char id[256];
3544 ram_addr_t length;
3546 len = qemu_get_byte(f);
3547 qemu_get_buffer(f, (uint8_t *)id, len);
3548 id[len] = 0;
3549 length = qemu_get_be64(f);
3551 block = qemu_ram_block_by_name(id);
3552 if (block && !qemu_ram_is_migratable(block)) {
3553 error_report("block %s should not be migrated !", id);
3554 ret = -EINVAL;
3555 } else if (block) {
3556 if (length != block->used_length) {
3557 Error *local_err = NULL;
3559 ret = qemu_ram_resize(block, length,
3560 &local_err);
3561 if (local_err) {
3562 error_report_err(local_err);
3565 /* For postcopy we need to check hugepage sizes match */
3566 if (postcopy_advised &&
3567 block->page_size != qemu_host_page_size) {
3568 uint64_t remote_page_size = qemu_get_be64(f);
3569 if (remote_page_size != block->page_size) {
3570 error_report("Mismatched RAM page size %s "
3571 "(local) %zd != %" PRId64,
3572 id, block->page_size,
3573 remote_page_size);
3574 ret = -EINVAL;
3577 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
3578 block->idstr);
3579 } else {
3580 error_report("Unknown ramblock \"%s\", cannot "
3581 "accept migration", id);
3582 ret = -EINVAL;
3585 total_ram_bytes -= length;
3587 break;
3589 case RAM_SAVE_FLAG_ZERO:
3590 ch = qemu_get_byte(f);
3591 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
3592 break;
3594 case RAM_SAVE_FLAG_PAGE:
3595 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
3596 break;
3598 case RAM_SAVE_FLAG_COMPRESS_PAGE:
3599 len = qemu_get_be32(f);
3600 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3601 error_report("Invalid compressed data length: %d", len);
3602 ret = -EINVAL;
3603 break;
3605 decompress_data_with_multi_threads(f, host, len);
3606 break;
3608 case RAM_SAVE_FLAG_XBZRLE:
3609 if (load_xbzrle(f, addr, host) < 0) {
3610 error_report("Failed to decompress XBZRLE page at "
3611 RAM_ADDR_FMT, addr);
3612 ret = -EINVAL;
3613 break;
3615 break;
3616 case RAM_SAVE_FLAG_EOS:
3617 /* normal exit */
3618 break;
3619 default:
3620 if (flags & RAM_SAVE_FLAG_HOOK) {
3621 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
3622 } else {
3623 error_report("Unknown combination of migration flags: %#x",
3624 flags);
3625 ret = -EINVAL;
3628 if (!ret) {
3629 ret = qemu_file_get_error(f);
3633 ret |= wait_for_decompress_done();
3634 rcu_read_unlock();
3635 trace_ram_load_complete(ret, seq_iter);
3636 return ret;
3639 static bool ram_has_postcopy(void *opaque)
3641 return migrate_postcopy_ram();
3644 /* Sync all the dirty bitmap with destination VM. */
3645 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
3647 RAMBlock *block;
3648 QEMUFile *file = s->to_dst_file;
3649 int ramblock_count = 0;
3651 trace_ram_dirty_bitmap_sync_start();
3653 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3654 qemu_savevm_send_recv_bitmap(file, block->idstr);
3655 trace_ram_dirty_bitmap_request(block->idstr);
3656 ramblock_count++;
3659 trace_ram_dirty_bitmap_sync_wait();
3661 /* Wait until all the ramblocks' dirty bitmap synced */
3662 while (ramblock_count--) {
3663 qemu_sem_wait(&s->rp_state.rp_sem);
3666 trace_ram_dirty_bitmap_sync_complete();
3668 return 0;
3671 static void ram_dirty_bitmap_reload_notify(MigrationState *s)
3673 qemu_sem_post(&s->rp_state.rp_sem);
3677 * Read the received bitmap, revert it as the initial dirty bitmap.
3678 * This is only used when the postcopy migration is paused but wants
3679 * to resume from a middle point.
3681 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
3683 int ret = -EINVAL;
3684 QEMUFile *file = s->rp_state.from_dst_file;
3685 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
3686 uint64_t local_size = nbits / 8;
3687 uint64_t size, end_mark;
3689 trace_ram_dirty_bitmap_reload_begin(block->idstr);
3691 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
3692 error_report("%s: incorrect state %s", __func__,
3693 MigrationStatus_str(s->state));
3694 return -EINVAL;
3698 * Note: see comments in ramblock_recv_bitmap_send() on why we
3699 * need the endianess convertion, and the paddings.
3701 local_size = ROUND_UP(local_size, 8);
3703 /* Add paddings */
3704 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
3706 size = qemu_get_be64(file);
3708 /* The size of the bitmap should match with our ramblock */
3709 if (size != local_size) {
3710 error_report("%s: ramblock '%s' bitmap size mismatch "
3711 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
3712 block->idstr, size, local_size);
3713 ret = -EINVAL;
3714 goto out;
3717 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
3718 end_mark = qemu_get_be64(file);
3720 ret = qemu_file_get_error(file);
3721 if (ret || size != local_size) {
3722 error_report("%s: read bitmap failed for ramblock '%s': %d"
3723 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
3724 __func__, block->idstr, ret, local_size, size);
3725 ret = -EIO;
3726 goto out;
3729 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
3730 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
3731 __func__, block->idstr, end_mark);
3732 ret = -EINVAL;
3733 goto out;
3737 * Endianess convertion. We are during postcopy (though paused).
3738 * The dirty bitmap won't change. We can directly modify it.
3740 bitmap_from_le(block->bmap, le_bitmap, nbits);
3743 * What we received is "received bitmap". Revert it as the initial
3744 * dirty bitmap for this ramblock.
3746 bitmap_complement(block->bmap, block->bmap, nbits);
3748 trace_ram_dirty_bitmap_reload_complete(block->idstr);
3751 * We succeeded to sync bitmap for current ramblock. If this is
3752 * the last one to sync, we need to notify the main send thread.
3754 ram_dirty_bitmap_reload_notify(s);
3756 ret = 0;
3757 out:
3758 g_free(le_bitmap);
3759 return ret;
3762 static int ram_resume_prepare(MigrationState *s, void *opaque)
3764 RAMState *rs = *(RAMState **)opaque;
3765 int ret;
3767 ret = ram_dirty_bitmap_sync_all(s, rs);
3768 if (ret) {
3769 return ret;
3772 ram_state_resume_prepare(rs, s->to_dst_file);
3774 return 0;
3777 static SaveVMHandlers savevm_ram_handlers = {
3778 .save_setup = ram_save_setup,
3779 .save_live_iterate = ram_save_iterate,
3780 .save_live_complete_postcopy = ram_save_complete,
3781 .save_live_complete_precopy = ram_save_complete,
3782 .has_postcopy = ram_has_postcopy,
3783 .save_live_pending = ram_save_pending,
3784 .load_state = ram_load,
3785 .save_cleanup = ram_save_cleanup,
3786 .load_setup = ram_load_setup,
3787 .load_cleanup = ram_load_cleanup,
3788 .resume_prepare = ram_resume_prepare,
3791 void ram_mig_init(void)
3793 qemu_mutex_init(&XBZRLE.lock);
3794 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);