4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include <sys/types.h>
32 #include "monitor/monitor.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "sysemu/arch_init.h"
37 #include "audio/audio.h"
38 #include "hw/i386/pc.h"
39 #include "hw/pci/pci.h"
40 #include "hw/audio/audio.h"
41 #include "sysemu/kvm.h"
42 #include "migration/migration.h"
43 #include "hw/i386/smbios.h"
44 #include "exec/address-spaces.h"
45 #include "hw/audio/pcspk.h"
46 #include "migration/page_cache.h"
47 #include "qemu/config-file.h"
48 #include "qmp-commands.h"
50 #include "exec/cpu-all.h"
51 #include "exec/ram_addr.h"
52 #include "hw/acpi/acpi.h"
54 #ifdef DEBUG_ARCH_INIT
55 #define DPRINTF(fmt, ...) \
56 do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
58 #define DPRINTF(fmt, ...) \
63 int graphic_width
= 1024;
64 int graphic_height
= 768;
65 int graphic_depth
= 8;
67 int graphic_width
= 800;
68 int graphic_height
= 600;
69 int graphic_depth
= 32;
73 #if defined(TARGET_ALPHA)
74 #define QEMU_ARCH QEMU_ARCH_ALPHA
75 #elif defined(TARGET_ARM)
76 #define QEMU_ARCH QEMU_ARCH_ARM
77 #elif defined(TARGET_CRIS)
78 #define QEMU_ARCH QEMU_ARCH_CRIS
79 #elif defined(TARGET_I386)
80 #define QEMU_ARCH QEMU_ARCH_I386
81 #elif defined(TARGET_M68K)
82 #define QEMU_ARCH QEMU_ARCH_M68K
83 #elif defined(TARGET_LM32)
84 #define QEMU_ARCH QEMU_ARCH_LM32
85 #elif defined(TARGET_MICROBLAZE)
86 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
87 #elif defined(TARGET_MIPS)
88 #define QEMU_ARCH QEMU_ARCH_MIPS
89 #elif defined(TARGET_MOXIE)
90 #define QEMU_ARCH QEMU_ARCH_MOXIE
91 #elif defined(TARGET_OPENRISC)
92 #define QEMU_ARCH QEMU_ARCH_OPENRISC
93 #elif defined(TARGET_PPC)
94 #define QEMU_ARCH QEMU_ARCH_PPC
95 #elif defined(TARGET_S390X)
96 #define QEMU_ARCH QEMU_ARCH_S390X
97 #elif defined(TARGET_SH4)
98 #define QEMU_ARCH QEMU_ARCH_SH4
99 #elif defined(TARGET_SPARC)
100 #define QEMU_ARCH QEMU_ARCH_SPARC
101 #elif defined(TARGET_XTENSA)
102 #define QEMU_ARCH QEMU_ARCH_XTENSA
103 #elif defined(TARGET_UNICORE32)
104 #define QEMU_ARCH QEMU_ARCH_UNICORE32
107 const uint32_t arch_type
= QEMU_ARCH
;
108 static bool mig_throttle_on
;
109 static int dirty_rate_high_cnt
;
110 static void check_guest_throttling(void);
112 /***********************************************************/
113 /* ram save/restore */
115 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
116 #define RAM_SAVE_FLAG_COMPRESS 0x02
117 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
118 #define RAM_SAVE_FLAG_PAGE 0x08
119 #define RAM_SAVE_FLAG_EOS 0x10
120 #define RAM_SAVE_FLAG_CONTINUE 0x20
121 #define RAM_SAVE_FLAG_XBZRLE 0x40
122 /* 0x80 is reserved in migration.h start with 0x100 next */
125 static struct defconfig_file
{
126 const char *filename
;
127 /* Indicates it is an user config file (disabled by -no-user-config) */
129 } default_config_files
[] = {
130 { CONFIG_QEMU_CONFDIR
"/qemu.conf", true },
131 { CONFIG_QEMU_CONFDIR
"/target-" TARGET_NAME
".conf", true },
132 { NULL
}, /* end of list */
136 int qemu_read_default_config_files(bool userconfig
)
139 struct defconfig_file
*f
;
141 for (f
= default_config_files
; f
->filename
; f
++) {
142 if (!userconfig
&& f
->userconfig
) {
145 ret
= qemu_read_config_file(f
->filename
);
146 if (ret
< 0 && ret
!= -ENOENT
) {
154 static inline bool is_zero_range(uint8_t *p
, uint64_t size
)
156 return buffer_find_nonzero_offset(p
, size
) == size
;
159 /* struct contains XBZRLE cache and a static page
160 used by the compression */
162 /* buffer used for XBZRLE encoding */
163 uint8_t *encoded_buf
;
164 /* buffer for storing page content */
165 uint8_t *current_buf
;
166 /* buffer used for XBZRLE decoding */
167 uint8_t *decoded_buf
;
168 /* Cache for XBZRLE */
178 int64_t xbzrle_cache_resize(int64_t new_size
)
180 if (XBZRLE
.cache
!= NULL
) {
181 return cache_resize(XBZRLE
.cache
, new_size
/ TARGET_PAGE_SIZE
) *
184 return pow2floor(new_size
);
187 /* accounting for migration statistics */
188 typedef struct AccountingInfo
{
190 uint64_t skipped_pages
;
193 uint64_t xbzrle_bytes
;
194 uint64_t xbzrle_pages
;
195 uint64_t xbzrle_cache_miss
;
196 uint64_t xbzrle_overflows
;
199 static AccountingInfo acct_info
;
201 static void acct_clear(void)
203 memset(&acct_info
, 0, sizeof(acct_info
));
206 uint64_t dup_mig_bytes_transferred(void)
208 return acct_info
.dup_pages
* TARGET_PAGE_SIZE
;
211 uint64_t dup_mig_pages_transferred(void)
213 return acct_info
.dup_pages
;
216 uint64_t skipped_mig_bytes_transferred(void)
218 return acct_info
.skipped_pages
* TARGET_PAGE_SIZE
;
221 uint64_t skipped_mig_pages_transferred(void)
223 return acct_info
.skipped_pages
;
226 uint64_t norm_mig_bytes_transferred(void)
228 return acct_info
.norm_pages
* TARGET_PAGE_SIZE
;
231 uint64_t norm_mig_pages_transferred(void)
233 return acct_info
.norm_pages
;
236 uint64_t xbzrle_mig_bytes_transferred(void)
238 return acct_info
.xbzrle_bytes
;
241 uint64_t xbzrle_mig_pages_transferred(void)
243 return acct_info
.xbzrle_pages
;
246 uint64_t xbzrle_mig_pages_cache_miss(void)
248 return acct_info
.xbzrle_cache_miss
;
251 uint64_t xbzrle_mig_pages_overflow(void)
253 return acct_info
.xbzrle_overflows
;
256 static size_t save_block_hdr(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t offset
,
261 qemu_put_be64(f
, offset
| cont
| flag
);
265 qemu_put_byte(f
, strlen(block
->idstr
));
266 qemu_put_buffer(f
, (uint8_t *)block
->idstr
,
267 strlen(block
->idstr
));
268 size
+= 1 + strlen(block
->idstr
);
273 #define ENCODING_FLAG_XBZRLE 0x1
275 static int save_xbzrle_page(QEMUFile
*f
, uint8_t *current_data
,
276 ram_addr_t current_addr
, RAMBlock
*block
,
277 ram_addr_t offset
, int cont
, bool last_stage
)
279 int encoded_len
= 0, bytes_sent
= -1;
280 uint8_t *prev_cached_page
;
282 if (!cache_is_cached(XBZRLE
.cache
, current_addr
)) {
284 cache_insert(XBZRLE
.cache
, current_addr
, current_data
);
286 acct_info
.xbzrle_cache_miss
++;
290 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
292 /* save current buffer into memory */
293 memcpy(XBZRLE
.current_buf
, current_data
, TARGET_PAGE_SIZE
);
295 /* XBZRLE encoding (if there is no overflow) */
296 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
297 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
299 if (encoded_len
== 0) {
300 DPRINTF("Skipping unmodified page\n");
302 } else if (encoded_len
== -1) {
303 DPRINTF("Overflow\n");
304 acct_info
.xbzrle_overflows
++;
305 /* update data in the cache */
306 memcpy(prev_cached_page
, current_data
, TARGET_PAGE_SIZE
);
310 /* we need to update the data in the cache, in order to get the same data */
312 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
315 /* Send XBZRLE based compressed page */
316 bytes_sent
= save_block_hdr(f
, block
, offset
, cont
, RAM_SAVE_FLAG_XBZRLE
);
317 qemu_put_byte(f
, ENCODING_FLAG_XBZRLE
);
318 qemu_put_be16(f
, encoded_len
);
319 qemu_put_buffer(f
, XBZRLE
.encoded_buf
, encoded_len
);
320 bytes_sent
+= encoded_len
+ 1 + 2;
321 acct_info
.xbzrle_pages
++;
322 acct_info
.xbzrle_bytes
+= bytes_sent
;
328 /* This is the last block that we have visited serching for dirty pages
330 static RAMBlock
*last_seen_block
;
331 /* This is the last block from where we have sent data */
332 static RAMBlock
*last_sent_block
;
333 static ram_addr_t last_offset
;
334 static unsigned long *migration_bitmap
;
335 static uint64_t migration_dirty_pages
;
336 static uint32_t last_version
;
337 static bool ram_bulk_stage
;
340 ram_addr_t
migration_bitmap_find_and_reset_dirty(MemoryRegion
*mr
,
343 unsigned long base
= mr
->ram_addr
>> TARGET_PAGE_BITS
;
344 unsigned long nr
= base
+ (start
>> TARGET_PAGE_BITS
);
345 uint64_t mr_size
= TARGET_PAGE_ALIGN(memory_region_size(mr
));
346 unsigned long size
= base
+ (mr_size
>> TARGET_PAGE_BITS
);
350 if (ram_bulk_stage
&& nr
> base
) {
353 next
= find_next_bit(migration_bitmap
, size
, nr
);
357 clear_bit(next
, migration_bitmap
);
358 migration_dirty_pages
--;
360 return (next
- base
) << TARGET_PAGE_BITS
;
363 static inline bool migration_bitmap_set_dirty(MemoryRegion
*mr
,
367 int nr
= (mr
->ram_addr
+ offset
) >> TARGET_PAGE_BITS
;
369 ret
= test_and_set_bit(nr
, migration_bitmap
);
372 migration_dirty_pages
++;
377 /* Needs iothread lock! */
379 static void migration_bitmap_sync(void)
383 uint64_t num_dirty_pages_init
= migration_dirty_pages
;
384 MigrationState
*s
= migrate_get_current();
385 static int64_t start_time
;
386 static int64_t bytes_xfer_prev
;
387 static int64_t num_dirty_pages_period
;
389 int64_t bytes_xfer_now
;
391 if (!bytes_xfer_prev
) {
392 bytes_xfer_prev
= ram_bytes_transferred();
396 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
399 trace_migration_bitmap_sync_start();
400 address_space_sync_dirty_bitmap(&address_space_memory
);
402 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
403 for (addr
= 0; addr
< block
->length
; addr
+= TARGET_PAGE_SIZE
) {
404 if (cpu_physical_memory_get_dirty(block
->mr
->ram_addr
+ addr
,
406 DIRTY_MEMORY_MIGRATION
)) {
407 cpu_physical_memory_reset_dirty(block
->mr
->ram_addr
+ addr
,
409 DIRTY_MEMORY_MIGRATION
);
410 migration_bitmap_set_dirty(block
->mr
, addr
);
414 trace_migration_bitmap_sync_end(migration_dirty_pages
415 - num_dirty_pages_init
);
416 num_dirty_pages_period
+= migration_dirty_pages
- num_dirty_pages_init
;
417 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
419 /* more than 1 second = 1000 millisecons */
420 if (end_time
> start_time
+ 1000) {
421 if (migrate_auto_converge()) {
422 /* The following detection logic can be refined later. For now:
423 Check to see if the dirtied bytes is 50% more than the approx.
424 amount of bytes that just got transferred since the last time we
425 were in this routine. If that happens >N times (for now N==4)
426 we turn on the throttle down logic */
427 bytes_xfer_now
= ram_bytes_transferred();
428 if (s
->dirty_pages_rate
&&
429 (num_dirty_pages_period
* TARGET_PAGE_SIZE
>
430 (bytes_xfer_now
- bytes_xfer_prev
)/2) &&
431 (dirty_rate_high_cnt
++ > 4)) {
432 trace_migration_throttle();
433 mig_throttle_on
= true;
434 dirty_rate_high_cnt
= 0;
436 bytes_xfer_prev
= bytes_xfer_now
;
438 mig_throttle_on
= false;
440 s
->dirty_pages_rate
= num_dirty_pages_period
* 1000
441 / (end_time
- start_time
);
442 s
->dirty_bytes_rate
= s
->dirty_pages_rate
* TARGET_PAGE_SIZE
;
443 start_time
= end_time
;
444 num_dirty_pages_period
= 0;
449 * ram_save_block: Writes a page of memory to the stream f
451 * Returns: The number of bytes written.
452 * 0 means no dirty pages
455 static int ram_save_block(QEMUFile
*f
, bool last_stage
)
457 RAMBlock
*block
= last_seen_block
;
458 ram_addr_t offset
= last_offset
;
459 bool complete_round
= false;
462 ram_addr_t current_addr
;
465 block
= QTAILQ_FIRST(&ram_list
.blocks
);
469 offset
= migration_bitmap_find_and_reset_dirty(mr
, offset
);
470 if (complete_round
&& block
== last_seen_block
&&
471 offset
>= last_offset
) {
474 if (offset
>= block
->length
) {
476 block
= QTAILQ_NEXT(block
, next
);
478 block
= QTAILQ_FIRST(&ram_list
.blocks
);
479 complete_round
= true;
480 ram_bulk_stage
= false;
485 int cont
= (block
== last_sent_block
) ?
486 RAM_SAVE_FLAG_CONTINUE
: 0;
488 p
= memory_region_get_ram_ptr(mr
) + offset
;
490 /* In doubt sent page as normal */
492 ret
= ram_control_save_page(f
, block
->offset
,
493 offset
, TARGET_PAGE_SIZE
, &bytes_sent
);
495 if (ret
!= RAM_SAVE_CONTROL_NOT_SUPP
) {
496 if (ret
!= RAM_SAVE_CONTROL_DELAYED
) {
497 if (bytes_sent
> 0) {
498 acct_info
.norm_pages
++;
499 } else if (bytes_sent
== 0) {
500 acct_info
.dup_pages
++;
503 } else if (is_zero_range(p
, TARGET_PAGE_SIZE
)) {
504 acct_info
.dup_pages
++;
505 bytes_sent
= save_block_hdr(f
, block
, offset
, cont
,
506 RAM_SAVE_FLAG_COMPRESS
);
509 } else if (!ram_bulk_stage
&& migrate_use_xbzrle()) {
510 current_addr
= block
->offset
+ offset
;
511 bytes_sent
= save_xbzrle_page(f
, p
, current_addr
, block
,
512 offset
, cont
, last_stage
);
514 p
= get_cached_data(XBZRLE
.cache
, current_addr
);
518 /* XBZRLE overflow or normal page */
519 if (bytes_sent
== -1) {
520 bytes_sent
= save_block_hdr(f
, block
, offset
, cont
, RAM_SAVE_FLAG_PAGE
);
521 qemu_put_buffer_async(f
, p
, TARGET_PAGE_SIZE
);
522 bytes_sent
+= TARGET_PAGE_SIZE
;
523 acct_info
.norm_pages
++;
526 /* if page is unmodified, continue to the next */
527 if (bytes_sent
> 0) {
528 last_sent_block
= block
;
533 last_seen_block
= block
;
534 last_offset
= offset
;
539 static uint64_t bytes_transferred
;
541 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
543 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
545 acct_info
.dup_pages
+= pages
;
547 acct_info
.norm_pages
+= pages
;
548 bytes_transferred
+= size
;
549 qemu_update_position(f
, size
);
553 static ram_addr_t
ram_save_remaining(void)
555 return migration_dirty_pages
;
558 uint64_t ram_bytes_remaining(void)
560 return ram_save_remaining() * TARGET_PAGE_SIZE
;
563 uint64_t ram_bytes_transferred(void)
565 return bytes_transferred
;
568 uint64_t ram_bytes_total(void)
573 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
574 total
+= block
->length
;
579 static void migration_end(void)
581 if (migration_bitmap
) {
582 memory_global_dirty_log_stop();
583 g_free(migration_bitmap
);
584 migration_bitmap
= NULL
;
588 cache_fini(XBZRLE
.cache
);
589 g_free(XBZRLE
.cache
);
590 g_free(XBZRLE
.encoded_buf
);
591 g_free(XBZRLE
.current_buf
);
592 g_free(XBZRLE
.decoded_buf
);
597 static void ram_migration_cancel(void *opaque
)
602 static void reset_ram_globals(void)
604 last_seen_block
= NULL
;
605 last_sent_block
= NULL
;
607 last_version
= ram_list
.version
;
608 ram_bulk_stage
= true;
611 #define MAX_WAIT 50 /* ms, half buffered_file limit */
613 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
616 int64_t ram_pages
= last_ram_offset() >> TARGET_PAGE_BITS
;
618 migration_bitmap
= bitmap_new(ram_pages
);
619 bitmap_set(migration_bitmap
, 0, ram_pages
);
620 migration_dirty_pages
= ram_pages
;
621 mig_throttle_on
= false;
622 dirty_rate_high_cnt
= 0;
624 if (migrate_use_xbzrle()) {
625 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size() /
629 DPRINTF("Error creating cache\n");
632 XBZRLE
.encoded_buf
= g_malloc0(TARGET_PAGE_SIZE
);
633 XBZRLE
.current_buf
= g_malloc(TARGET_PAGE_SIZE
);
637 qemu_mutex_lock_iothread();
638 qemu_mutex_lock_ramlist();
639 bytes_transferred
= 0;
642 memory_global_dirty_log_start();
643 migration_bitmap_sync();
644 qemu_mutex_unlock_iothread();
646 qemu_put_be64(f
, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE
);
648 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
649 qemu_put_byte(f
, strlen(block
->idstr
));
650 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
651 qemu_put_be64(f
, block
->length
);
654 qemu_mutex_unlock_ramlist();
656 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
657 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
659 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
664 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
671 qemu_mutex_lock_ramlist();
673 if (ram_list
.version
!= last_version
) {
677 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
679 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
681 while ((ret
= qemu_file_rate_limit(f
)) == 0) {
684 bytes_sent
= ram_save_block(f
, false);
685 /* no more blocks to sent */
686 if (bytes_sent
== 0) {
689 total_sent
+= bytes_sent
;
690 acct_info
.iterations
++;
691 check_guest_throttling();
692 /* we want to check in the 1st loop, just in case it was the 1st time
693 and we had to sync the dirty bitmap.
694 qemu_get_clock_ns() is a bit expensive, so we only check each some
698 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) / 1000000;
700 DPRINTF("big wait: %" PRIu64
" milliseconds, %d iterations\n",
708 qemu_mutex_unlock_ramlist();
711 * Must occur before EOS (or any QEMUFile operation)
712 * because of RDMA protocol.
714 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
716 bytes_transferred
+= total_sent
;
719 * Do not count these 8 bytes into total_sent, so that we can
720 * return 0 if no page had been dirtied.
722 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
723 bytes_transferred
+= 8;
725 ret
= qemu_file_get_error(f
);
733 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
735 qemu_mutex_lock_ramlist();
736 migration_bitmap_sync();
738 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
740 /* try transferring iterative blocks of memory */
742 /* flush all remaining blocks regardless of rate limiting */
746 bytes_sent
= ram_save_block(f
, true);
747 /* no more blocks to sent */
748 if (bytes_sent
== 0) {
751 bytes_transferred
+= bytes_sent
;
754 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
757 qemu_mutex_unlock_ramlist();
758 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
763 static uint64_t ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
)
765 uint64_t remaining_size
;
767 remaining_size
= ram_save_remaining() * TARGET_PAGE_SIZE
;
769 if (remaining_size
< max_size
) {
770 qemu_mutex_lock_iothread();
771 migration_bitmap_sync();
772 qemu_mutex_unlock_iothread();
773 remaining_size
= ram_save_remaining() * TARGET_PAGE_SIZE
;
775 return remaining_size
;
778 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
784 if (!XBZRLE
.decoded_buf
) {
785 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
788 /* extract RLE header */
789 xh_flags
= qemu_get_byte(f
);
790 xh_len
= qemu_get_be16(f
);
792 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
793 fprintf(stderr
, "Failed to load XBZRLE page - wrong compression!\n");
797 if (xh_len
> TARGET_PAGE_SIZE
) {
798 fprintf(stderr
, "Failed to load XBZRLE page - len overflow!\n");
801 /* load data and decode */
802 qemu_get_buffer(f
, XBZRLE
.decoded_buf
, xh_len
);
805 ret
= xbzrle_decode_buffer(XBZRLE
.decoded_buf
, xh_len
, host
,
808 fprintf(stderr
, "Failed to load XBZRLE page - decode error!\n");
810 } else if (ret
> TARGET_PAGE_SIZE
) {
811 fprintf(stderr
, "Failed to load XBZRLE page - size %d exceeds %d!\n",
812 ret
, TARGET_PAGE_SIZE
);
819 static inline void *host_from_stream_offset(QEMUFile
*f
,
823 static RAMBlock
*block
= NULL
;
827 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
829 fprintf(stderr
, "Ack, bad migration stream!\n");
833 return memory_region_get_ram_ptr(block
->mr
) + offset
;
836 len
= qemu_get_byte(f
);
837 qemu_get_buffer(f
, (uint8_t *)id
, len
);
840 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
841 if (!strncmp(id
, block
->idstr
, sizeof(id
)))
842 return memory_region_get_ram_ptr(block
->mr
) + offset
;
845 fprintf(stderr
, "Can't find block %s!\n", id
);
850 * If a page (or a whole RDMA chunk) has been
851 * determined to be zero, then zap it.
853 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
855 if (ch
!= 0 || !is_zero_range(host
, size
)) {
856 memset(host
, ch
, size
);
860 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
865 static uint64_t seq_iter
;
869 if (version_id
< 4 || version_id
> 4) {
874 addr
= qemu_get_be64(f
);
876 flags
= addr
& ~TARGET_PAGE_MASK
;
877 addr
&= TARGET_PAGE_MASK
;
879 if (flags
& RAM_SAVE_FLAG_MEM_SIZE
) {
880 if (version_id
== 4) {
881 /* Synchronize RAM block list */
884 ram_addr_t total_ram_bytes
= addr
;
886 while (total_ram_bytes
) {
890 len
= qemu_get_byte(f
);
891 qemu_get_buffer(f
, (uint8_t *)id
, len
);
893 length
= qemu_get_be64(f
);
895 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
896 if (!strncmp(id
, block
->idstr
, sizeof(id
))) {
897 if (block
->length
!= length
) {
899 "Length mismatch: %s: " RAM_ADDR_FMT
900 " in != " RAM_ADDR_FMT
"\n", id
, length
,
910 fprintf(stderr
, "Unknown ramblock \"%s\", cannot "
911 "accept migration\n", id
);
916 total_ram_bytes
-= length
;
921 if (flags
& RAM_SAVE_FLAG_COMPRESS
) {
925 host
= host_from_stream_offset(f
, addr
, flags
);
930 ch
= qemu_get_byte(f
);
931 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
932 } else if (flags
& RAM_SAVE_FLAG_PAGE
) {
935 host
= host_from_stream_offset(f
, addr
, flags
);
940 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
941 } else if (flags
& RAM_SAVE_FLAG_XBZRLE
) {
942 void *host
= host_from_stream_offset(f
, addr
, flags
);
947 if (load_xbzrle(f
, addr
, host
) < 0) {
951 } else if (flags
& RAM_SAVE_FLAG_HOOK
) {
952 ram_control_load_hook(f
, flags
);
954 error
= qemu_file_get_error(f
);
959 } while (!(flags
& RAM_SAVE_FLAG_EOS
));
962 DPRINTF("Completed load of VM with exit code %d seq iteration "
963 "%" PRIu64
"\n", ret
, seq_iter
);
967 SaveVMHandlers savevm_ram_handlers
= {
968 .save_live_setup
= ram_save_setup
,
969 .save_live_iterate
= ram_save_iterate
,
970 .save_live_complete
= ram_save_complete
,
971 .save_live_pending
= ram_save_pending
,
972 .load_state
= ram_load
,
973 .cancel
= ram_migration_cancel
,
982 int (*init_isa
) (ISABus
*bus
);
983 int (*init_pci
) (PCIBus
*bus
);
987 static struct soundhw soundhw
[9];
988 static int soundhw_count
;
990 void isa_register_soundhw(const char *name
, const char *descr
,
991 int (*init_isa
)(ISABus
*bus
))
993 assert(soundhw_count
< ARRAY_SIZE(soundhw
) - 1);
994 soundhw
[soundhw_count
].name
= name
;
995 soundhw
[soundhw_count
].descr
= descr
;
996 soundhw
[soundhw_count
].isa
= 1;
997 soundhw
[soundhw_count
].init
.init_isa
= init_isa
;
1001 void pci_register_soundhw(const char *name
, const char *descr
,
1002 int (*init_pci
)(PCIBus
*bus
))
1004 assert(soundhw_count
< ARRAY_SIZE(soundhw
) - 1);
1005 soundhw
[soundhw_count
].name
= name
;
1006 soundhw
[soundhw_count
].descr
= descr
;
1007 soundhw
[soundhw_count
].isa
= 0;
1008 soundhw
[soundhw_count
].init
.init_pci
= init_pci
;
1012 void select_soundhw(const char *optarg
)
1016 if (is_help_option(optarg
)) {
1019 if (soundhw_count
) {
1020 printf("Valid sound card names (comma separated):\n");
1021 for (c
= soundhw
; c
->name
; ++c
) {
1022 printf ("%-11s %s\n", c
->name
, c
->descr
);
1024 printf("\n-soundhw all will enable all of the above\n");
1026 printf("Machine has no user-selectable audio hardware "
1027 "(it may or may not have always-present audio hardware).\n");
1029 exit(!is_help_option(optarg
));
1037 if (!strcmp(optarg
, "all")) {
1038 for (c
= soundhw
; c
->name
; ++c
) {
1047 l
= !e
? strlen(p
) : (size_t) (e
- p
);
1049 for (c
= soundhw
; c
->name
; ++c
) {
1050 if (!strncmp(c
->name
, p
, l
) && !c
->name
[l
]) {
1059 "Unknown sound card name (too big to show)\n");
1062 fprintf(stderr
, "Unknown sound card name `%.*s'\n",
1067 p
+= l
+ (e
!= NULL
);
1071 goto show_valid_cards
;
1076 void audio_init(void)
1079 ISABus
*isa_bus
= (ISABus
*) object_resolve_path_type("", TYPE_ISA_BUS
, NULL
);
1080 PCIBus
*pci_bus
= (PCIBus
*) object_resolve_path_type("", TYPE_PCI_BUS
, NULL
);
1082 for (c
= soundhw
; c
->name
; ++c
) {
1086 fprintf(stderr
, "ISA bus not available for %s\n", c
->name
);
1089 c
->init
.init_isa(isa_bus
);
1092 fprintf(stderr
, "PCI bus not available for %s\n", c
->name
);
1095 c
->init
.init_pci(pci_bus
);
1101 int qemu_uuid_parse(const char *str
, uint8_t *uuid
)
1105 if (strlen(str
) != 36) {
1109 ret
= sscanf(str
, UUID_FMT
, &uuid
[0], &uuid
[1], &uuid
[2], &uuid
[3],
1110 &uuid
[4], &uuid
[5], &uuid
[6], &uuid
[7], &uuid
[8], &uuid
[9],
1111 &uuid
[10], &uuid
[11], &uuid
[12], &uuid
[13], &uuid
[14],
1120 void do_acpitable_option(const QemuOpts
*opts
)
1125 acpi_table_add(opts
, &err
);
1127 error_report("Wrong acpi table provided: %s",
1128 error_get_pretty(err
));
1135 void do_smbios_option(QemuOpts
*opts
)
1138 smbios_entry_add(opts
);
1142 void cpudef_init(void)
1144 #if defined(cpudef_setup)
1145 cpudef_setup(); /* parse cpu definitions in target config file */
1149 int tcg_available(void)
1154 int kvm_available(void)
1163 int xen_available(void)
1173 TargetInfo
*qmp_query_target(Error
**errp
)
1175 TargetInfo
*info
= g_malloc0(sizeof(*info
));
1177 info
->arch
= g_strdup(TARGET_NAME
);
1182 /* Stub function that's gets run on the vcpu when its brought out of the
1183 VM to run inside qemu via async_run_on_cpu()*/
1184 static void mig_sleep_cpu(void *opq
)
1186 qemu_mutex_unlock_iothread();
1188 qemu_mutex_lock_iothread();
1191 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1192 much time in the VM. The migration thread will try to catchup.
1193 Workload will experience a performance drop.
1195 static void mig_throttle_guest_down(void)
1199 qemu_mutex_lock_iothread();
1201 async_run_on_cpu(cpu
, mig_sleep_cpu
, NULL
);
1203 qemu_mutex_unlock_iothread();
1206 static void check_guest_throttling(void)
1211 if (!mig_throttle_on
) {
1216 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1220 t1
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1222 /* If it has been more than 40 ms since the last time the guest
1223 * was throttled then do it again.
1225 if (40 < (t1
-t0
)/1000000) {
1226 mig_throttle_guest_down();