configure: Prepend pixman and ftd flags to overrule system-provided ones
[qemu-kvm.git] / arch_init.c
blob9b3e25d8050d0ecf238800ca0de9a255e3077b17
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include <stdint.h>
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #ifndef _WIN32
28 #include <sys/types.h>
29 #include <sys/mman.h>
30 #endif
31 #include "config.h"
32 #include "monitor/monitor.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "sysemu/arch_init.h"
37 #include "audio/audio.h"
38 #include "hw/i386/pc.h"
39 #include "hw/pci/pci.h"
40 #include "hw/audio/audio.h"
41 #include "sysemu/kvm.h"
42 #include "migration/migration.h"
43 #include "hw/i386/smbios.h"
44 #include "exec/address-spaces.h"
45 #include "hw/audio/pcspk.h"
46 #include "migration/page_cache.h"
47 #include "qemu/config-file.h"
48 #include "qemu/error-report.h"
49 #include "qmp-commands.h"
50 #include "trace.h"
51 #include "exec/cpu-all.h"
52 #include "exec/ram_addr.h"
53 #include "hw/acpi/acpi.h"
54 #include "qemu/host-utils.h"
56 #ifdef DEBUG_ARCH_INIT
57 #define DPRINTF(fmt, ...) \
58 do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
59 #else
60 #define DPRINTF(fmt, ...) \
61 do { } while (0)
62 #endif
64 #ifdef TARGET_SPARC
65 int graphic_width = 1024;
66 int graphic_height = 768;
67 int graphic_depth = 8;
68 #else
69 int graphic_width = 800;
70 int graphic_height = 600;
71 int graphic_depth = 32;
72 #endif
75 #if defined(TARGET_ALPHA)
76 #define QEMU_ARCH QEMU_ARCH_ALPHA
77 #elif defined(TARGET_ARM)
78 #define QEMU_ARCH QEMU_ARCH_ARM
79 #elif defined(TARGET_CRIS)
80 #define QEMU_ARCH QEMU_ARCH_CRIS
81 #elif defined(TARGET_I386)
82 #define QEMU_ARCH QEMU_ARCH_I386
83 #elif defined(TARGET_M68K)
84 #define QEMU_ARCH QEMU_ARCH_M68K
85 #elif defined(TARGET_LM32)
86 #define QEMU_ARCH QEMU_ARCH_LM32
87 #elif defined(TARGET_MICROBLAZE)
88 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
89 #elif defined(TARGET_MIPS)
90 #define QEMU_ARCH QEMU_ARCH_MIPS
91 #elif defined(TARGET_MOXIE)
92 #define QEMU_ARCH QEMU_ARCH_MOXIE
93 #elif defined(TARGET_OPENRISC)
94 #define QEMU_ARCH QEMU_ARCH_OPENRISC
95 #elif defined(TARGET_PPC)
96 #define QEMU_ARCH QEMU_ARCH_PPC
97 #elif defined(TARGET_S390X)
98 #define QEMU_ARCH QEMU_ARCH_S390X
99 #elif defined(TARGET_SH4)
100 #define QEMU_ARCH QEMU_ARCH_SH4
101 #elif defined(TARGET_SPARC)
102 #define QEMU_ARCH QEMU_ARCH_SPARC
103 #elif defined(TARGET_XTENSA)
104 #define QEMU_ARCH QEMU_ARCH_XTENSA
105 #elif defined(TARGET_UNICORE32)
106 #define QEMU_ARCH QEMU_ARCH_UNICORE32
107 #elif defined(TARGET_TRICORE)
108 #define QEMU_ARCH QEMU_ARCH_TRICORE
109 #endif
111 const uint32_t arch_type = QEMU_ARCH;
112 static bool mig_throttle_on;
113 static int dirty_rate_high_cnt;
114 static void check_guest_throttling(void);
116 static uint64_t bitmap_sync_count;
118 /***********************************************************/
119 /* ram save/restore */
121 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
122 #define RAM_SAVE_FLAG_COMPRESS 0x02
123 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
124 #define RAM_SAVE_FLAG_PAGE 0x08
125 #define RAM_SAVE_FLAG_EOS 0x10
126 #define RAM_SAVE_FLAG_CONTINUE 0x20
127 #define RAM_SAVE_FLAG_XBZRLE 0x40
128 /* 0x80 is reserved in migration.h start with 0x100 next */
130 static struct defconfig_file {
131 const char *filename;
132 /* Indicates it is an user config file (disabled by -no-user-config) */
133 bool userconfig;
134 } default_config_files[] = {
135 { CONFIG_QEMU_CONFDIR "/qemu.conf", true },
136 { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
137 { NULL }, /* end of list */
140 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
142 int qemu_read_default_config_files(bool userconfig)
144 int ret;
145 struct defconfig_file *f;
147 for (f = default_config_files; f->filename; f++) {
148 if (!userconfig && f->userconfig) {
149 continue;
151 ret = qemu_read_config_file(f->filename);
152 if (ret < 0 && ret != -ENOENT) {
153 return ret;
157 return 0;
160 static inline bool is_zero_range(uint8_t *p, uint64_t size)
162 return buffer_find_nonzero_offset(p, size) == size;
165 /* struct contains XBZRLE cache and a static page
166 used by the compression */
167 static struct {
168 /* buffer used for XBZRLE encoding */
169 uint8_t *encoded_buf;
170 /* buffer for storing page content */
171 uint8_t *current_buf;
172 /* Cache for XBZRLE, Protected by lock. */
173 PageCache *cache;
174 QemuMutex lock;
175 } XBZRLE;
177 /* buffer used for XBZRLE decoding */
178 static uint8_t *xbzrle_decoded_buf;
180 static void XBZRLE_cache_lock(void)
182 if (migrate_use_xbzrle())
183 qemu_mutex_lock(&XBZRLE.lock);
186 static void XBZRLE_cache_unlock(void)
188 if (migrate_use_xbzrle())
189 qemu_mutex_unlock(&XBZRLE.lock);
193 * called from qmp_migrate_set_cache_size in main thread, possibly while
194 * a migration is in progress.
195 * A running migration maybe using the cache and might finish during this
196 * call, hence changes to the cache are protected by XBZRLE.lock().
198 int64_t xbzrle_cache_resize(int64_t new_size)
200 PageCache *new_cache;
201 int64_t ret;
203 if (new_size < TARGET_PAGE_SIZE) {
204 return -1;
207 XBZRLE_cache_lock();
209 if (XBZRLE.cache != NULL) {
210 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
211 goto out_new_size;
213 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
214 TARGET_PAGE_SIZE);
215 if (!new_cache) {
216 error_report("Error creating cache");
217 ret = -1;
218 goto out;
221 cache_fini(XBZRLE.cache);
222 XBZRLE.cache = new_cache;
225 out_new_size:
226 ret = pow2floor(new_size);
227 out:
228 XBZRLE_cache_unlock();
229 return ret;
232 /* accounting for migration statistics */
233 typedef struct AccountingInfo {
234 uint64_t dup_pages;
235 uint64_t skipped_pages;
236 uint64_t norm_pages;
237 uint64_t iterations;
238 uint64_t xbzrle_bytes;
239 uint64_t xbzrle_pages;
240 uint64_t xbzrle_cache_miss;
241 double xbzrle_cache_miss_rate;
242 uint64_t xbzrle_overflows;
243 } AccountingInfo;
245 static AccountingInfo acct_info;
247 static void acct_clear(void)
249 memset(&acct_info, 0, sizeof(acct_info));
252 uint64_t dup_mig_bytes_transferred(void)
254 return acct_info.dup_pages * TARGET_PAGE_SIZE;
257 uint64_t dup_mig_pages_transferred(void)
259 return acct_info.dup_pages;
262 uint64_t skipped_mig_bytes_transferred(void)
264 return acct_info.skipped_pages * TARGET_PAGE_SIZE;
267 uint64_t skipped_mig_pages_transferred(void)
269 return acct_info.skipped_pages;
272 uint64_t norm_mig_bytes_transferred(void)
274 return acct_info.norm_pages * TARGET_PAGE_SIZE;
277 uint64_t norm_mig_pages_transferred(void)
279 return acct_info.norm_pages;
282 uint64_t xbzrle_mig_bytes_transferred(void)
284 return acct_info.xbzrle_bytes;
287 uint64_t xbzrle_mig_pages_transferred(void)
289 return acct_info.xbzrle_pages;
292 uint64_t xbzrle_mig_pages_cache_miss(void)
294 return acct_info.xbzrle_cache_miss;
297 double xbzrle_mig_cache_miss_rate(void)
299 return acct_info.xbzrle_cache_miss_rate;
302 uint64_t xbzrle_mig_pages_overflow(void)
304 return acct_info.xbzrle_overflows;
307 static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
308 int cont, int flag)
310 size_t size;
312 qemu_put_be64(f, offset | cont | flag);
313 size = 8;
315 if (!cont) {
316 qemu_put_byte(f, strlen(block->idstr));
317 qemu_put_buffer(f, (uint8_t *)block->idstr,
318 strlen(block->idstr));
319 size += 1 + strlen(block->idstr);
321 return size;
324 /* This is the last block that we have visited serching for dirty pages
326 static RAMBlock *last_seen_block;
327 /* This is the last block from where we have sent data */
328 static RAMBlock *last_sent_block;
329 static ram_addr_t last_offset;
330 static unsigned long *migration_bitmap;
331 static uint64_t migration_dirty_pages;
332 static uint32_t last_version;
333 static bool ram_bulk_stage;
335 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
336 * The important thing is that a stale (not-yet-0'd) page be replaced
337 * by the new data.
338 * As a bonus, if the page wasn't in the cache it gets added so that
339 * when a small write is made into the 0'd page it gets XBZRLE sent
341 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
343 if (ram_bulk_stage || !migrate_use_xbzrle()) {
344 return;
347 /* We don't care if this fails to allocate a new cache page
348 * as long as it updated an old one */
349 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
352 #define ENCODING_FLAG_XBZRLE 0x1
354 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
355 ram_addr_t current_addr, RAMBlock *block,
356 ram_addr_t offset, int cont, bool last_stage)
358 int encoded_len = 0, bytes_sent = -1;
359 uint8_t *prev_cached_page;
361 if (!cache_is_cached(XBZRLE.cache, current_addr)) {
362 acct_info.xbzrle_cache_miss++;
363 if (!last_stage) {
364 if (cache_insert(XBZRLE.cache, current_addr, *current_data) == -1) {
365 return -1;
366 } else {
367 /* update *current_data when the page has been
368 inserted into cache */
369 *current_data = get_cached_data(XBZRLE.cache, current_addr);
372 return -1;
375 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
377 /* save current buffer into memory */
378 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
380 /* XBZRLE encoding (if there is no overflow) */
381 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
382 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
383 TARGET_PAGE_SIZE);
384 if (encoded_len == 0) {
385 DPRINTF("Skipping unmodified page\n");
386 return 0;
387 } else if (encoded_len == -1) {
388 DPRINTF("Overflow\n");
389 acct_info.xbzrle_overflows++;
390 /* update data in the cache */
391 if (!last_stage) {
392 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
393 *current_data = prev_cached_page;
395 return -1;
398 /* we need to update the data in the cache, in order to get the same data */
399 if (!last_stage) {
400 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
403 /* Send XBZRLE based compressed page */
404 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
405 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
406 qemu_put_be16(f, encoded_len);
407 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
408 bytes_sent += encoded_len + 1 + 2;
409 acct_info.xbzrle_pages++;
410 acct_info.xbzrle_bytes += bytes_sent;
412 return bytes_sent;
415 static inline
416 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
417 ram_addr_t start)
419 unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
420 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
421 uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
422 unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
424 unsigned long next;
426 if (ram_bulk_stage && nr > base) {
427 next = nr + 1;
428 } else {
429 next = find_next_bit(migration_bitmap, size, nr);
432 if (next < size) {
433 clear_bit(next, migration_bitmap);
434 migration_dirty_pages--;
436 return (next - base) << TARGET_PAGE_BITS;
439 static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
441 bool ret;
442 int nr = addr >> TARGET_PAGE_BITS;
444 ret = test_and_set_bit(nr, migration_bitmap);
446 if (!ret) {
447 migration_dirty_pages++;
449 return ret;
452 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
454 ram_addr_t addr;
455 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
457 /* start address is aligned at the start of a word? */
458 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
459 int k;
460 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
461 unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
463 for (k = page; k < page + nr; k++) {
464 if (src[k]) {
465 unsigned long new_dirty;
466 new_dirty = ~migration_bitmap[k];
467 migration_bitmap[k] |= src[k];
468 new_dirty &= src[k];
469 migration_dirty_pages += ctpopl(new_dirty);
470 src[k] = 0;
473 } else {
474 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
475 if (cpu_physical_memory_get_dirty(start + addr,
476 TARGET_PAGE_SIZE,
477 DIRTY_MEMORY_MIGRATION)) {
478 cpu_physical_memory_reset_dirty(start + addr,
479 TARGET_PAGE_SIZE,
480 DIRTY_MEMORY_MIGRATION);
481 migration_bitmap_set_dirty(start + addr);
488 /* Needs iothread lock! */
490 static void migration_bitmap_sync(void)
492 RAMBlock *block;
493 uint64_t num_dirty_pages_init = migration_dirty_pages;
494 MigrationState *s = migrate_get_current();
495 static int64_t start_time;
496 static int64_t bytes_xfer_prev;
497 static int64_t num_dirty_pages_period;
498 int64_t end_time;
499 int64_t bytes_xfer_now;
500 static uint64_t xbzrle_cache_miss_prev;
501 static uint64_t iterations_prev;
503 bitmap_sync_count++;
505 if (!bytes_xfer_prev) {
506 bytes_xfer_prev = ram_bytes_transferred();
509 if (!start_time) {
510 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
513 trace_migration_bitmap_sync_start();
514 address_space_sync_dirty_bitmap(&address_space_memory);
516 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
517 migration_bitmap_sync_range(block->mr->ram_addr, block->length);
519 trace_migration_bitmap_sync_end(migration_dirty_pages
520 - num_dirty_pages_init);
521 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
522 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
524 /* more than 1 second = 1000 millisecons */
525 if (end_time > start_time + 1000) {
526 if (migrate_auto_converge()) {
527 /* The following detection logic can be refined later. For now:
528 Check to see if the dirtied bytes is 50% more than the approx.
529 amount of bytes that just got transferred since the last time we
530 were in this routine. If that happens >N times (for now N==4)
531 we turn on the throttle down logic */
532 bytes_xfer_now = ram_bytes_transferred();
533 if (s->dirty_pages_rate &&
534 (num_dirty_pages_period * TARGET_PAGE_SIZE >
535 (bytes_xfer_now - bytes_xfer_prev)/2) &&
536 (dirty_rate_high_cnt++ > 4)) {
537 trace_migration_throttle();
538 mig_throttle_on = true;
539 dirty_rate_high_cnt = 0;
541 bytes_xfer_prev = bytes_xfer_now;
542 } else {
543 mig_throttle_on = false;
545 if (migrate_use_xbzrle()) {
546 if (iterations_prev != 0) {
547 acct_info.xbzrle_cache_miss_rate =
548 (double)(acct_info.xbzrle_cache_miss -
549 xbzrle_cache_miss_prev) /
550 (acct_info.iterations - iterations_prev);
552 iterations_prev = acct_info.iterations;
553 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
555 s->dirty_pages_rate = num_dirty_pages_period * 1000
556 / (end_time - start_time);
557 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
558 start_time = end_time;
559 num_dirty_pages_period = 0;
560 s->dirty_sync_count = bitmap_sync_count;
565 * ram_save_page: Send the given page to the stream
567 * Returns: Number of bytes written.
569 static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
570 bool last_stage)
572 int bytes_sent;
573 int cont;
574 ram_addr_t current_addr;
575 MemoryRegion *mr = block->mr;
576 uint8_t *p;
577 int ret;
578 bool send_async = true;
580 cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
582 p = memory_region_get_ram_ptr(mr) + offset;
584 /* In doubt sent page as normal */
585 bytes_sent = -1;
586 ret = ram_control_save_page(f, block->offset,
587 offset, TARGET_PAGE_SIZE, &bytes_sent);
589 XBZRLE_cache_lock();
591 current_addr = block->offset + offset;
592 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
593 if (ret != RAM_SAVE_CONTROL_DELAYED) {
594 if (bytes_sent > 0) {
595 acct_info.norm_pages++;
596 } else if (bytes_sent == 0) {
597 acct_info.dup_pages++;
600 } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
601 acct_info.dup_pages++;
602 bytes_sent = save_block_hdr(f, block, offset, cont,
603 RAM_SAVE_FLAG_COMPRESS);
604 qemu_put_byte(f, 0);
605 bytes_sent++;
606 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
607 * page would be stale
609 xbzrle_cache_zero_page(current_addr);
610 } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
611 bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
612 offset, cont, last_stage);
613 if (!last_stage) {
614 /* Can't send this cached data async, since the cache page
615 * might get updated before it gets to the wire
617 send_async = false;
621 /* XBZRLE overflow or normal page */
622 if (bytes_sent == -1) {
623 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
624 if (send_async) {
625 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
626 } else {
627 qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
629 bytes_sent += TARGET_PAGE_SIZE;
630 acct_info.norm_pages++;
633 XBZRLE_cache_unlock();
635 return bytes_sent;
639 * ram_find_and_save_block: Finds a page to send and sends it to f
641 * Returns: The number of bytes written.
642 * 0 means no dirty pages
645 static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
647 RAMBlock *block = last_seen_block;
648 ram_addr_t offset = last_offset;
649 bool complete_round = false;
650 int bytes_sent = 0;
651 MemoryRegion *mr;
653 if (!block)
654 block = QTAILQ_FIRST(&ram_list.blocks);
656 while (true) {
657 mr = block->mr;
658 offset = migration_bitmap_find_and_reset_dirty(mr, offset);
659 if (complete_round && block == last_seen_block &&
660 offset >= last_offset) {
661 break;
663 if (offset >= block->length) {
664 offset = 0;
665 block = QTAILQ_NEXT(block, next);
666 if (!block) {
667 block = QTAILQ_FIRST(&ram_list.blocks);
668 complete_round = true;
669 ram_bulk_stage = false;
671 } else {
672 bytes_sent = ram_save_page(f, block, offset, last_stage);
674 /* if page is unmodified, continue to the next */
675 if (bytes_sent > 0) {
676 last_sent_block = block;
677 break;
681 last_seen_block = block;
682 last_offset = offset;
684 return bytes_sent;
687 static uint64_t bytes_transferred;
689 void acct_update_position(QEMUFile *f, size_t size, bool zero)
691 uint64_t pages = size / TARGET_PAGE_SIZE;
692 if (zero) {
693 acct_info.dup_pages += pages;
694 } else {
695 acct_info.norm_pages += pages;
696 bytes_transferred += size;
697 qemu_update_position(f, size);
701 static ram_addr_t ram_save_remaining(void)
703 return migration_dirty_pages;
706 uint64_t ram_bytes_remaining(void)
708 return ram_save_remaining() * TARGET_PAGE_SIZE;
711 uint64_t ram_bytes_transferred(void)
713 return bytes_transferred;
716 uint64_t ram_bytes_total(void)
718 RAMBlock *block;
719 uint64_t total = 0;
721 QTAILQ_FOREACH(block, &ram_list.blocks, next)
722 total += block->length;
724 return total;
727 void free_xbzrle_decoded_buf(void)
729 g_free(xbzrle_decoded_buf);
730 xbzrle_decoded_buf = NULL;
733 static void migration_end(void)
735 if (migration_bitmap) {
736 memory_global_dirty_log_stop();
737 g_free(migration_bitmap);
738 migration_bitmap = NULL;
741 XBZRLE_cache_lock();
742 if (XBZRLE.cache) {
743 cache_fini(XBZRLE.cache);
744 g_free(XBZRLE.encoded_buf);
745 g_free(XBZRLE.current_buf);
746 XBZRLE.cache = NULL;
747 XBZRLE.encoded_buf = NULL;
748 XBZRLE.current_buf = NULL;
750 XBZRLE_cache_unlock();
753 static void ram_migration_cancel(void *opaque)
755 migration_end();
758 static void reset_ram_globals(void)
760 last_seen_block = NULL;
761 last_sent_block = NULL;
762 last_offset = 0;
763 last_version = ram_list.version;
764 ram_bulk_stage = true;
767 #define MAX_WAIT 50 /* ms, half buffered_file limit */
769 static int ram_save_setup(QEMUFile *f, void *opaque)
771 RAMBlock *block;
772 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
774 mig_throttle_on = false;
775 dirty_rate_high_cnt = 0;
776 bitmap_sync_count = 0;
778 if (migrate_use_xbzrle()) {
779 XBZRLE_cache_lock();
780 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
781 TARGET_PAGE_SIZE,
782 TARGET_PAGE_SIZE);
783 if (!XBZRLE.cache) {
784 XBZRLE_cache_unlock();
785 error_report("Error creating cache");
786 return -1;
788 XBZRLE_cache_unlock();
790 /* We prefer not to abort if there is no memory */
791 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
792 if (!XBZRLE.encoded_buf) {
793 error_report("Error allocating encoded_buf");
794 return -1;
797 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
798 if (!XBZRLE.current_buf) {
799 error_report("Error allocating current_buf");
800 g_free(XBZRLE.encoded_buf);
801 XBZRLE.encoded_buf = NULL;
802 return -1;
805 acct_clear();
808 qemu_mutex_lock_iothread();
809 qemu_mutex_lock_ramlist();
810 bytes_transferred = 0;
811 reset_ram_globals();
813 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
814 migration_bitmap = bitmap_new(ram_bitmap_pages);
815 bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
818 * Count the total number of pages used by ram blocks not including any
819 * gaps due to alignment or unplugs.
821 migration_dirty_pages = 0;
822 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
823 uint64_t block_pages;
825 block_pages = block->length >> TARGET_PAGE_BITS;
826 migration_dirty_pages += block_pages;
829 memory_global_dirty_log_start();
830 migration_bitmap_sync();
831 qemu_mutex_unlock_iothread();
833 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
835 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
836 qemu_put_byte(f, strlen(block->idstr));
837 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
838 qemu_put_be64(f, block->length);
841 qemu_mutex_unlock_ramlist();
843 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
844 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
846 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
848 return 0;
851 static int ram_save_iterate(QEMUFile *f, void *opaque)
853 int ret;
854 int i;
855 int64_t t0;
856 int total_sent = 0;
858 qemu_mutex_lock_ramlist();
860 if (ram_list.version != last_version) {
861 reset_ram_globals();
864 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
866 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
867 i = 0;
868 while ((ret = qemu_file_rate_limit(f)) == 0) {
869 int bytes_sent;
871 bytes_sent = ram_find_and_save_block(f, false);
872 /* no more blocks to sent */
873 if (bytes_sent == 0) {
874 break;
876 total_sent += bytes_sent;
877 acct_info.iterations++;
878 check_guest_throttling();
879 /* we want to check in the 1st loop, just in case it was the 1st time
880 and we had to sync the dirty bitmap.
881 qemu_get_clock_ns() is a bit expensive, so we only check each some
882 iterations
884 if ((i & 63) == 0) {
885 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
886 if (t1 > MAX_WAIT) {
887 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
888 t1, i);
889 break;
892 i++;
895 qemu_mutex_unlock_ramlist();
898 * Must occur before EOS (or any QEMUFile operation)
899 * because of RDMA protocol.
901 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
903 bytes_transferred += total_sent;
906 * Do not count these 8 bytes into total_sent, so that we can
907 * return 0 if no page had been dirtied.
909 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
910 bytes_transferred += 8;
912 ret = qemu_file_get_error(f);
913 if (ret < 0) {
914 return ret;
917 return total_sent;
920 static int ram_save_complete(QEMUFile *f, void *opaque)
922 qemu_mutex_lock_ramlist();
923 migration_bitmap_sync();
925 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
927 /* try transferring iterative blocks of memory */
929 /* flush all remaining blocks regardless of rate limiting */
930 while (true) {
931 int bytes_sent;
933 bytes_sent = ram_find_and_save_block(f, true);
934 /* no more blocks to sent */
935 if (bytes_sent == 0) {
936 break;
938 bytes_transferred += bytes_sent;
941 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
942 migration_end();
944 qemu_mutex_unlock_ramlist();
945 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
947 return 0;
950 static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
952 uint64_t remaining_size;
954 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
956 if (remaining_size < max_size) {
957 qemu_mutex_lock_iothread();
958 migration_bitmap_sync();
959 qemu_mutex_unlock_iothread();
960 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
962 return remaining_size;
965 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
967 unsigned int xh_len;
968 int xh_flags;
970 if (!xbzrle_decoded_buf) {
971 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
974 /* extract RLE header */
975 xh_flags = qemu_get_byte(f);
976 xh_len = qemu_get_be16(f);
978 if (xh_flags != ENCODING_FLAG_XBZRLE) {
979 error_report("Failed to load XBZRLE page - wrong compression!");
980 return -1;
983 if (xh_len > TARGET_PAGE_SIZE) {
984 error_report("Failed to load XBZRLE page - len overflow!");
985 return -1;
987 /* load data and decode */
988 qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
990 /* decode RLE */
991 if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
992 TARGET_PAGE_SIZE) == -1) {
993 error_report("Failed to load XBZRLE page - decode error!");
994 return -1;
997 return 0;
1000 static inline void *host_from_stream_offset(QEMUFile *f,
1001 ram_addr_t offset,
1002 int flags)
1004 static RAMBlock *block = NULL;
1005 char id[256];
1006 uint8_t len;
1008 if (flags & RAM_SAVE_FLAG_CONTINUE) {
1009 if (!block) {
1010 error_report("Ack, bad migration stream!");
1011 return NULL;
1014 return memory_region_get_ram_ptr(block->mr) + offset;
1017 len = qemu_get_byte(f);
1018 qemu_get_buffer(f, (uint8_t *)id, len);
1019 id[len] = 0;
1021 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1022 if (!strncmp(id, block->idstr, sizeof(id)))
1023 return memory_region_get_ram_ptr(block->mr) + offset;
1026 error_report("Can't find block %s!", id);
1027 return NULL;
1031 * If a page (or a whole RDMA chunk) has been
1032 * determined to be zero, then zap it.
1034 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
1036 if (ch != 0 || !is_zero_range(host, size)) {
1037 memset(host, ch, size);
1041 static int ram_load(QEMUFile *f, void *opaque, int version_id)
1043 ram_addr_t addr;
1044 int flags, ret = 0;
1045 static uint64_t seq_iter;
1047 seq_iter++;
1049 if (version_id != 4) {
1050 ret = -EINVAL;
1053 while (!ret) {
1054 addr = qemu_get_be64(f);
1056 flags = addr & ~TARGET_PAGE_MASK;
1057 addr &= TARGET_PAGE_MASK;
1059 if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
1060 /* Synchronize RAM block list */
1061 char id[256];
1062 ram_addr_t length;
1063 ram_addr_t total_ram_bytes = addr;
1065 while (total_ram_bytes) {
1066 RAMBlock *block;
1067 uint8_t len;
1069 len = qemu_get_byte(f);
1070 qemu_get_buffer(f, (uint8_t *)id, len);
1071 id[len] = 0;
1072 length = qemu_get_be64(f);
1074 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1075 if (!strncmp(id, block->idstr, sizeof(id))) {
1076 if (block->length != length) {
1077 error_report("Length mismatch: %s: 0x" RAM_ADDR_FMT
1078 " in != 0x" RAM_ADDR_FMT, id, length,
1079 block->length);
1080 ret = -EINVAL;
1082 break;
1086 if (!block) {
1087 error_report("Unknown ramblock \"%s\", cannot "
1088 "accept migration", id);
1089 ret = -EINVAL;
1091 if (ret) {
1092 break;
1095 total_ram_bytes -= length;
1097 } else if (flags & RAM_SAVE_FLAG_COMPRESS) {
1098 void *host;
1099 uint8_t ch;
1101 host = host_from_stream_offset(f, addr, flags);
1102 if (!host) {
1103 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1104 ret = -EINVAL;
1105 break;
1108 ch = qemu_get_byte(f);
1109 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
1110 } else if (flags & RAM_SAVE_FLAG_PAGE) {
1111 void *host;
1113 host = host_from_stream_offset(f, addr, flags);
1114 if (!host) {
1115 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1116 ret = -EINVAL;
1117 break;
1120 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
1121 } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
1122 void *host = host_from_stream_offset(f, addr, flags);
1123 if (!host) {
1124 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1125 ret = -EINVAL;
1126 break;
1129 if (load_xbzrle(f, addr, host) < 0) {
1130 error_report("Failed to decompress XBZRLE page at "
1131 RAM_ADDR_FMT, addr);
1132 ret = -EINVAL;
1133 break;
1135 } else if (flags & RAM_SAVE_FLAG_HOOK) {
1136 ram_control_load_hook(f, flags);
1137 } else if (flags & RAM_SAVE_FLAG_EOS) {
1138 /* normal exit */
1139 break;
1140 } else {
1141 error_report("Unknown migration flags: %#x", flags);
1142 ret = -EINVAL;
1143 break;
1145 ret = qemu_file_get_error(f);
1148 DPRINTF("Completed load of VM with exit code %d seq iteration "
1149 "%" PRIu64 "\n", ret, seq_iter);
1150 return ret;
1153 static SaveVMHandlers savevm_ram_handlers = {
1154 .save_live_setup = ram_save_setup,
1155 .save_live_iterate = ram_save_iterate,
1156 .save_live_complete = ram_save_complete,
1157 .save_live_pending = ram_save_pending,
1158 .load_state = ram_load,
1159 .cancel = ram_migration_cancel,
1162 void ram_mig_init(void)
1164 qemu_mutex_init(&XBZRLE.lock);
1165 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
1168 struct soundhw {
1169 const char *name;
1170 const char *descr;
1171 int enabled;
1172 int isa;
1173 union {
1174 int (*init_isa) (ISABus *bus);
1175 int (*init_pci) (PCIBus *bus);
1176 } init;
1179 static struct soundhw soundhw[9];
1180 static int soundhw_count;
1182 void isa_register_soundhw(const char *name, const char *descr,
1183 int (*init_isa)(ISABus *bus))
1185 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1186 soundhw[soundhw_count].name = name;
1187 soundhw[soundhw_count].descr = descr;
1188 soundhw[soundhw_count].isa = 1;
1189 soundhw[soundhw_count].init.init_isa = init_isa;
1190 soundhw_count++;
1193 void pci_register_soundhw(const char *name, const char *descr,
1194 int (*init_pci)(PCIBus *bus))
1196 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1197 soundhw[soundhw_count].name = name;
1198 soundhw[soundhw_count].descr = descr;
1199 soundhw[soundhw_count].isa = 0;
1200 soundhw[soundhw_count].init.init_pci = init_pci;
1201 soundhw_count++;
1204 void select_soundhw(const char *optarg)
1206 struct soundhw *c;
1208 if (is_help_option(optarg)) {
1209 show_valid_cards:
1211 if (soundhw_count) {
1212 printf("Valid sound card names (comma separated):\n");
1213 for (c = soundhw; c->name; ++c) {
1214 printf ("%-11s %s\n", c->name, c->descr);
1216 printf("\n-soundhw all will enable all of the above\n");
1217 } else {
1218 printf("Machine has no user-selectable audio hardware "
1219 "(it may or may not have always-present audio hardware).\n");
1221 exit(!is_help_option(optarg));
1223 else {
1224 size_t l;
1225 const char *p;
1226 char *e;
1227 int bad_card = 0;
1229 if (!strcmp(optarg, "all")) {
1230 for (c = soundhw; c->name; ++c) {
1231 c->enabled = 1;
1233 return;
1236 p = optarg;
1237 while (*p) {
1238 e = strchr(p, ',');
1239 l = !e ? strlen(p) : (size_t) (e - p);
1241 for (c = soundhw; c->name; ++c) {
1242 if (!strncmp(c->name, p, l) && !c->name[l]) {
1243 c->enabled = 1;
1244 break;
1248 if (!c->name) {
1249 if (l > 80) {
1250 error_report("Unknown sound card name (too big to show)");
1252 else {
1253 error_report("Unknown sound card name `%.*s'",
1254 (int) l, p);
1256 bad_card = 1;
1258 p += l + (e != NULL);
1261 if (bad_card) {
1262 goto show_valid_cards;
1267 void audio_init(void)
1269 struct soundhw *c;
1270 ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1271 PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
1273 for (c = soundhw; c->name; ++c) {
1274 if (c->enabled) {
1275 if (c->isa) {
1276 if (!isa_bus) {
1277 error_report("ISA bus not available for %s", c->name);
1278 exit(1);
1280 c->init.init_isa(isa_bus);
1281 } else {
1282 if (!pci_bus) {
1283 error_report("PCI bus not available for %s", c->name);
1284 exit(1);
1286 c->init.init_pci(pci_bus);
1292 int qemu_uuid_parse(const char *str, uint8_t *uuid)
1294 int ret;
1296 if (strlen(str) != 36) {
1297 return -1;
1300 ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1301 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1302 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1303 &uuid[15]);
1305 if (ret != 16) {
1306 return -1;
1308 return 0;
1311 void do_acpitable_option(const QemuOpts *opts)
1313 #ifdef TARGET_I386
1314 Error *err = NULL;
1316 acpi_table_add(opts, &err);
1317 if (err) {
1318 error_report("Wrong acpi table provided: %s",
1319 error_get_pretty(err));
1320 error_free(err);
1321 exit(1);
1323 #endif
1326 void do_smbios_option(QemuOpts *opts)
1328 #ifdef TARGET_I386
1329 smbios_entry_add(opts);
1330 #endif
1333 void cpudef_init(void)
1335 #if defined(cpudef_setup)
1336 cpudef_setup(); /* parse cpu definitions in target config file */
1337 #endif
1340 int kvm_available(void)
1342 #ifdef CONFIG_KVM
1343 return 1;
1344 #else
1345 return 0;
1346 #endif
1349 int xen_available(void)
1351 #ifdef CONFIG_XEN
1352 return 1;
1353 #else
1354 return 0;
1355 #endif
1359 TargetInfo *qmp_query_target(Error **errp)
1361 TargetInfo *info = g_malloc0(sizeof(*info));
1363 info->arch = g_strdup(TARGET_NAME);
1365 return info;
1368 /* Stub function that's gets run on the vcpu when its brought out of the
1369 VM to run inside qemu via async_run_on_cpu()*/
1370 static void mig_sleep_cpu(void *opq)
1372 qemu_mutex_unlock_iothread();
1373 g_usleep(30*1000);
1374 qemu_mutex_lock_iothread();
1377 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1378 much time in the VM. The migration thread will try to catchup.
1379 Workload will experience a performance drop.
1381 static void mig_throttle_guest_down(void)
1383 CPUState *cpu;
1385 qemu_mutex_lock_iothread();
1386 CPU_FOREACH(cpu) {
1387 async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1389 qemu_mutex_unlock_iothread();
1392 static void check_guest_throttling(void)
1394 static int64_t t0;
1395 int64_t t1;
1397 if (!mig_throttle_on) {
1398 return;
1401 if (!t0) {
1402 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1403 return;
1406 t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1408 /* If it has been more than 40 ms since the last time the guest
1409 * was throttled then do it again.
1411 if (40 < (t1-t0)/1000000) {
1412 mig_throttle_guest_down();
1413 t0 = t1;