4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
24 #include "qemu-barrier.h"
31 /* This check must be after config-host.h is included */
33 #include <sys/eventfd.h>
36 /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
37 #define PAGE_SIZE TARGET_PAGE_SIZE
42 #define DPRINTF(fmt, ...) \
43 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
45 #define DPRINTF(fmt, ...) \
49 typedef struct KVMSlot
51 target_phys_addr_t start_addr
;
52 ram_addr_t memory_size
;
53 ram_addr_t phys_offset
;
58 typedef struct kvm_dirty_log KVMDirtyLog
;
66 struct kvm_coalesced_mmio_ring
*coalesced_mmio_ring
;
67 int broken_set_mem_region
;
70 int robust_singlestep
;
72 #ifdef KVM_CAP_SET_GUEST_DEBUG
73 struct kvm_sw_breakpoint_head kvm_sw_breakpoints
;
75 int irqchip_in_kernel
;
83 static const KVMCapabilityInfo kvm_required_capabilites
[] = {
84 KVM_CAP_INFO(USER_MEMORY
),
85 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS
),
89 static KVMSlot
*kvm_alloc_slot(KVMState
*s
)
93 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
94 if (s
->slots
[i
].memory_size
== 0) {
99 fprintf(stderr
, "%s: no free slot available\n", __func__
);
103 static KVMSlot
*kvm_lookup_matching_slot(KVMState
*s
,
104 target_phys_addr_t start_addr
,
105 target_phys_addr_t end_addr
)
109 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
110 KVMSlot
*mem
= &s
->slots
[i
];
112 if (start_addr
== mem
->start_addr
&&
113 end_addr
== mem
->start_addr
+ mem
->memory_size
) {
122 * Find overlapping slot with lowest start address
124 static KVMSlot
*kvm_lookup_overlapping_slot(KVMState
*s
,
125 target_phys_addr_t start_addr
,
126 target_phys_addr_t end_addr
)
128 KVMSlot
*found
= NULL
;
131 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
132 KVMSlot
*mem
= &s
->slots
[i
];
134 if (mem
->memory_size
== 0 ||
135 (found
&& found
->start_addr
< mem
->start_addr
)) {
139 if (end_addr
> mem
->start_addr
&&
140 start_addr
< mem
->start_addr
+ mem
->memory_size
) {
148 int kvm_physical_memory_addr_from_ram(KVMState
*s
, ram_addr_t ram_addr
,
149 target_phys_addr_t
*phys_addr
)
153 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
154 KVMSlot
*mem
= &s
->slots
[i
];
156 if (ram_addr
>= mem
->phys_offset
&&
157 ram_addr
< mem
->phys_offset
+ mem
->memory_size
) {
158 *phys_addr
= mem
->start_addr
+ (ram_addr
- mem
->phys_offset
);
166 static int kvm_set_user_memory_region(KVMState
*s
, KVMSlot
*slot
)
168 struct kvm_userspace_memory_region mem
;
170 mem
.slot
= slot
->slot
;
171 mem
.guest_phys_addr
= slot
->start_addr
;
172 mem
.memory_size
= slot
->memory_size
;
173 mem
.userspace_addr
= (unsigned long)qemu_safe_ram_ptr(slot
->phys_offset
);
174 mem
.flags
= slot
->flags
;
175 if (s
->migration_log
) {
176 mem
.flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
178 return kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
181 static void kvm_reset_vcpu(void *opaque
)
183 CPUState
*env
= opaque
;
185 kvm_arch_reset_vcpu(env
);
188 int kvm_irqchip_in_kernel(void)
190 return kvm_state
->irqchip_in_kernel
;
193 int kvm_pit_in_kernel(void)
195 return kvm_state
->pit_in_kernel
;
198 int kvm_init_vcpu(CPUState
*env
)
200 KVMState
*s
= kvm_state
;
204 DPRINTF("kvm_init_vcpu\n");
206 ret
= kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, env
->cpu_index
);
208 DPRINTF("kvm_create_vcpu failed\n");
214 env
->kvm_vcpu_dirty
= 1;
216 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
219 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
223 env
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
225 if (env
->kvm_run
== MAP_FAILED
) {
227 DPRINTF("mmap'ing vcpu state failed\n");
231 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
) {
232 s
->coalesced_mmio_ring
=
233 (void *)env
->kvm_run
+ s
->coalesced_mmio
* PAGE_SIZE
;
236 ret
= kvm_arch_init_vcpu(env
);
238 qemu_register_reset(kvm_reset_vcpu
, env
);
239 kvm_arch_reset_vcpu(env
);
246 * dirty pages logging control
249 static int kvm_mem_flags(KVMState
*s
, bool log_dirty
)
251 return log_dirty
? KVM_MEM_LOG_DIRTY_PAGES
: 0;
254 static int kvm_slot_dirty_pages_log_change(KVMSlot
*mem
, bool log_dirty
)
256 KVMState
*s
= kvm_state
;
257 int flags
, mask
= KVM_MEM_LOG_DIRTY_PAGES
;
260 old_flags
= mem
->flags
;
262 flags
= (mem
->flags
& ~mask
) | kvm_mem_flags(s
, log_dirty
);
265 /* If nothing changed effectively, no need to issue ioctl */
266 if (s
->migration_log
) {
267 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
270 if (flags
== old_flags
) {
274 return kvm_set_user_memory_region(s
, mem
);
277 static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr
,
278 ram_addr_t size
, bool log_dirty
)
280 KVMState
*s
= kvm_state
;
281 KVMSlot
*mem
= kvm_lookup_matching_slot(s
, phys_addr
, phys_addr
+ size
);
284 fprintf(stderr
, "BUG: %s: invalid parameters " TARGET_FMT_plx
"-"
285 TARGET_FMT_plx
"\n", __func__
, phys_addr
,
286 (target_phys_addr_t
)(phys_addr
+ size
- 1));
289 return kvm_slot_dirty_pages_log_change(mem
, log_dirty
);
292 static int kvm_log_start(CPUPhysMemoryClient
*client
,
293 target_phys_addr_t phys_addr
, ram_addr_t size
)
295 return kvm_dirty_pages_log_change(phys_addr
, size
, true);
298 static int kvm_log_stop(CPUPhysMemoryClient
*client
,
299 target_phys_addr_t phys_addr
, ram_addr_t size
)
301 return kvm_dirty_pages_log_change(phys_addr
, size
, false);
304 static int kvm_set_migration_log(int enable
)
306 KVMState
*s
= kvm_state
;
310 s
->migration_log
= enable
;
312 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
315 if (!mem
->memory_size
) {
318 if (!!(mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) == enable
) {
321 err
= kvm_set_user_memory_region(s
, mem
);
329 /* get kvm's dirty pages bitmap and update qemu's */
330 static int kvm_get_dirty_pages_log_range(unsigned long start_addr
,
331 unsigned long *bitmap
,
332 unsigned long offset
,
333 unsigned long mem_size
)
336 unsigned long page_number
, addr
, addr1
, c
;
338 unsigned int len
= ((mem_size
/ TARGET_PAGE_SIZE
) + HOST_LONG_BITS
- 1) /
342 * bitmap-traveling is faster than memory-traveling (for addr...)
343 * especially when most of the memory is not dirty.
345 for (i
= 0; i
< len
; i
++) {
346 if (bitmap
[i
] != 0) {
347 c
= leul_to_cpu(bitmap
[i
]);
351 page_number
= i
* HOST_LONG_BITS
+ j
;
352 addr1
= page_number
* TARGET_PAGE_SIZE
;
353 addr
= offset
+ addr1
;
354 ram_addr
= cpu_get_physical_page_desc(addr
);
355 cpu_physical_memory_set_dirty(ram_addr
);
362 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
365 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
366 * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
367 * This means all bits are set to dirty.
369 * @start_add: start of logged region.
370 * @end_addr: end of logged region.
372 static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
373 target_phys_addr_t end_addr
)
375 KVMState
*s
= kvm_state
;
376 unsigned long size
, allocated_size
= 0;
381 d
.dirty_bitmap
= NULL
;
382 while (start_addr
< end_addr
) {
383 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, end_addr
);
388 size
= ALIGN(((mem
->memory_size
) >> TARGET_PAGE_BITS
), HOST_LONG_BITS
) / 8;
389 if (!d
.dirty_bitmap
) {
390 d
.dirty_bitmap
= qemu_malloc(size
);
391 } else if (size
> allocated_size
) {
392 d
.dirty_bitmap
= qemu_realloc(d
.dirty_bitmap
, size
);
394 allocated_size
= size
;
395 memset(d
.dirty_bitmap
, 0, allocated_size
);
399 if (kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
) == -1) {
400 DPRINTF("ioctl failed %d\n", errno
);
405 kvm_get_dirty_pages_log_range(mem
->start_addr
, d
.dirty_bitmap
,
406 mem
->start_addr
, mem
->memory_size
);
407 start_addr
= mem
->start_addr
+ mem
->memory_size
;
409 qemu_free(d
.dirty_bitmap
);
414 int kvm_coalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
417 KVMState
*s
= kvm_state
;
419 if (s
->coalesced_mmio
) {
420 struct kvm_coalesced_mmio_zone zone
;
425 ret
= kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
431 int kvm_uncoalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
434 KVMState
*s
= kvm_state
;
436 if (s
->coalesced_mmio
) {
437 struct kvm_coalesced_mmio_zone zone
;
442 ret
= kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
448 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
452 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
460 static int kvm_check_many_ioeventfds(void)
462 /* Userspace can use ioeventfd for io notification. This requires a host
463 * that supports eventfd(2) and an I/O thread; since eventfd does not
464 * support SIGIO it cannot interrupt the vcpu.
466 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
467 * can avoid creating too many ioeventfds.
469 #if defined(CONFIG_EVENTFD) && defined(CONFIG_IOTHREAD)
472 for (i
= 0; i
< ARRAY_SIZE(ioeventfds
); i
++) {
473 ioeventfds
[i
] = eventfd(0, EFD_CLOEXEC
);
474 if (ioeventfds
[i
] < 0) {
477 ret
= kvm_set_ioeventfd_pio_word(ioeventfds
[i
], 0, i
, true);
479 close(ioeventfds
[i
]);
484 /* Decide whether many devices are supported or not */
485 ret
= i
== ARRAY_SIZE(ioeventfds
);
488 kvm_set_ioeventfd_pio_word(ioeventfds
[i
], 0, i
, false);
489 close(ioeventfds
[i
]);
497 static const KVMCapabilityInfo
*
498 kvm_check_extension_list(KVMState
*s
, const KVMCapabilityInfo
*list
)
501 if (!kvm_check_extension(s
, list
->value
)) {
509 static void kvm_set_phys_mem(target_phys_addr_t start_addr
, ram_addr_t size
,
510 ram_addr_t phys_offset
, bool log_dirty
)
512 KVMState
*s
= kvm_state
;
513 ram_addr_t flags
= phys_offset
& ~TARGET_PAGE_MASK
;
517 /* kvm works in page size chunks, but the function may be called
518 with sub-page size and unaligned start address. */
519 size
= TARGET_PAGE_ALIGN(size
);
520 start_addr
= TARGET_PAGE_ALIGN(start_addr
);
522 /* KVM does not support read-only slots */
523 phys_offset
&= ~IO_MEM_ROM
;
526 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, start_addr
+ size
);
531 if (flags
< IO_MEM_UNASSIGNED
&& start_addr
>= mem
->start_addr
&&
532 (start_addr
+ size
<= mem
->start_addr
+ mem
->memory_size
) &&
533 (phys_offset
- start_addr
== mem
->phys_offset
- mem
->start_addr
)) {
534 /* The new slot fits into the existing one and comes with
535 * identical parameters - update flags and done. */
536 kvm_slot_dirty_pages_log_change(mem
, log_dirty
);
542 /* unregister the overlapping slot */
543 mem
->memory_size
= 0;
544 err
= kvm_set_user_memory_region(s
, mem
);
546 fprintf(stderr
, "%s: error unregistering overlapping slot: %s\n",
547 __func__
, strerror(-err
));
551 /* Workaround for older KVM versions: we can't join slots, even not by
552 * unregistering the previous ones and then registering the larger
553 * slot. We have to maintain the existing fragmentation. Sigh.
555 * This workaround assumes that the new slot starts at the same
556 * address as the first existing one. If not or if some overlapping
557 * slot comes around later, we will fail (not seen in practice so far)
558 * - and actually require a recent KVM version. */
559 if (s
->broken_set_mem_region
&&
560 old
.start_addr
== start_addr
&& old
.memory_size
< size
&&
561 flags
< IO_MEM_UNASSIGNED
) {
562 mem
= kvm_alloc_slot(s
);
563 mem
->memory_size
= old
.memory_size
;
564 mem
->start_addr
= old
.start_addr
;
565 mem
->phys_offset
= old
.phys_offset
;
566 mem
->flags
= kvm_mem_flags(s
, log_dirty
);
568 err
= kvm_set_user_memory_region(s
, mem
);
570 fprintf(stderr
, "%s: error updating slot: %s\n", __func__
,
575 start_addr
+= old
.memory_size
;
576 phys_offset
+= old
.memory_size
;
577 size
-= old
.memory_size
;
581 /* register prefix slot */
582 if (old
.start_addr
< start_addr
) {
583 mem
= kvm_alloc_slot(s
);
584 mem
->memory_size
= start_addr
- old
.start_addr
;
585 mem
->start_addr
= old
.start_addr
;
586 mem
->phys_offset
= old
.phys_offset
;
587 mem
->flags
= kvm_mem_flags(s
, log_dirty
);
589 err
= kvm_set_user_memory_region(s
, mem
);
591 fprintf(stderr
, "%s: error registering prefix slot: %s\n",
592 __func__
, strerror(-err
));
597 /* register suffix slot */
598 if (old
.start_addr
+ old
.memory_size
> start_addr
+ size
) {
599 ram_addr_t size_delta
;
601 mem
= kvm_alloc_slot(s
);
602 mem
->start_addr
= start_addr
+ size
;
603 size_delta
= mem
->start_addr
- old
.start_addr
;
604 mem
->memory_size
= old
.memory_size
- size_delta
;
605 mem
->phys_offset
= old
.phys_offset
+ size_delta
;
606 mem
->flags
= kvm_mem_flags(s
, log_dirty
);
608 err
= kvm_set_user_memory_region(s
, mem
);
610 fprintf(stderr
, "%s: error registering suffix slot: %s\n",
611 __func__
, strerror(-err
));
617 /* in case the KVM bug workaround already "consumed" the new slot */
621 /* KVM does not need to know about this memory */
622 if (flags
>= IO_MEM_UNASSIGNED
) {
625 mem
= kvm_alloc_slot(s
);
626 mem
->memory_size
= size
;
627 mem
->start_addr
= start_addr
;
628 mem
->phys_offset
= phys_offset
;
629 mem
->flags
= kvm_mem_flags(s
, log_dirty
);
631 err
= kvm_set_user_memory_region(s
, mem
);
633 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
639 static void kvm_client_set_memory(struct CPUPhysMemoryClient
*client
,
640 target_phys_addr_t start_addr
,
641 ram_addr_t size
, ram_addr_t phys_offset
,
644 kvm_set_phys_mem(start_addr
, size
, phys_offset
, log_dirty
);
647 static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient
*client
,
648 target_phys_addr_t start_addr
,
649 target_phys_addr_t end_addr
)
651 return kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
654 static int kvm_client_migration_log(struct CPUPhysMemoryClient
*client
,
657 return kvm_set_migration_log(enable
);
660 static CPUPhysMemoryClient kvm_cpu_phys_memory_client
= {
661 .set_memory
= kvm_client_set_memory
,
662 .sync_dirty_bitmap
= kvm_client_sync_dirty_bitmap
,
663 .migration_log
= kvm_client_migration_log
,
664 .log_start
= kvm_log_start
,
665 .log_stop
= kvm_log_stop
,
670 static const char upgrade_note
[] =
671 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
672 "(see http://sourceforge.net/projects/kvm).\n";
674 const KVMCapabilityInfo
*missing_cap
;
678 s
= qemu_mallocz(sizeof(KVMState
));
680 #ifdef KVM_CAP_SET_GUEST_DEBUG
681 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
683 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
684 s
->slots
[i
].slot
= i
;
687 s
->fd
= qemu_open("/dev/kvm", O_RDWR
);
689 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
694 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
695 if (ret
< KVM_API_VERSION
) {
699 fprintf(stderr
, "kvm version too old\n");
703 if (ret
> KVM_API_VERSION
) {
705 fprintf(stderr
, "kvm version not supported\n");
709 s
->vmfd
= kvm_ioctl(s
, KVM_CREATE_VM
, 0);
712 fprintf(stderr
, "Please add the 'switch_amode' kernel parameter to "
713 "your host kernel command line\n");
718 missing_cap
= kvm_check_extension_list(s
, kvm_required_capabilites
);
721 kvm_check_extension_list(s
, kvm_arch_required_capabilities
);
725 fprintf(stderr
, "kvm does not support %s\n%s",
726 missing_cap
->name
, upgrade_note
);
730 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
732 s
->broken_set_mem_region
= 1;
733 #ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
734 ret
= kvm_check_extension(s
, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
);
736 s
->broken_set_mem_region
= 0;
741 #ifdef KVM_CAP_VCPU_EVENTS
742 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
745 s
->robust_singlestep
= 0;
746 #ifdef KVM_CAP_X86_ROBUST_SINGLESTEP
747 s
->robust_singlestep
=
748 kvm_check_extension(s
, KVM_CAP_X86_ROBUST_SINGLESTEP
);
752 #ifdef KVM_CAP_DEBUGREGS
753 s
->debugregs
= kvm_check_extension(s
, KVM_CAP_DEBUGREGS
);
758 s
->xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
763 s
->xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
766 ret
= kvm_arch_init(s
);
772 cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client
);
774 s
->many_ioeventfds
= kvm_check_many_ioeventfds();
792 static void kvm_handle_io(uint16_t port
, void *data
, int direction
, int size
,
798 for (i
= 0; i
< count
; i
++) {
799 if (direction
== KVM_EXIT_IO_IN
) {
802 stb_p(ptr
, cpu_inb(port
));
805 stw_p(ptr
, cpu_inw(port
));
808 stl_p(ptr
, cpu_inl(port
));
814 cpu_outb(port
, ldub_p(ptr
));
817 cpu_outw(port
, lduw_p(ptr
));
820 cpu_outl(port
, ldl_p(ptr
));
829 #ifdef KVM_CAP_INTERNAL_ERROR_DATA
830 static int kvm_handle_internal_error(CPUState
*env
, struct kvm_run
*run
)
832 fprintf(stderr
, "KVM internal error.");
833 if (kvm_check_extension(kvm_state
, KVM_CAP_INTERNAL_ERROR_DATA
)) {
836 fprintf(stderr
, " Suberror: %d\n", run
->internal
.suberror
);
837 for (i
= 0; i
< run
->internal
.ndata
; ++i
) {
838 fprintf(stderr
, "extra data[%d]: %"PRIx64
"\n",
839 i
, (uint64_t)run
->internal
.data
[i
]);
842 fprintf(stderr
, "\n");
844 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
) {
845 fprintf(stderr
, "emulation failure\n");
846 if (!kvm_arch_stop_on_emulation_error(env
)) {
847 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_CODE
);
848 return EXCP_INTERRUPT
;
851 /* FIXME: Should trigger a qmp message to let management know
852 * something went wrong.
858 void kvm_flush_coalesced_mmio_buffer(void)
860 KVMState
*s
= kvm_state
;
861 if (s
->coalesced_mmio_ring
) {
862 struct kvm_coalesced_mmio_ring
*ring
= s
->coalesced_mmio_ring
;
863 while (ring
->first
!= ring
->last
) {
864 struct kvm_coalesced_mmio
*ent
;
866 ent
= &ring
->coalesced_mmio
[ring
->first
];
868 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
870 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
875 static void do_kvm_cpu_synchronize_state(void *_env
)
877 CPUState
*env
= _env
;
879 if (!env
->kvm_vcpu_dirty
) {
880 kvm_arch_get_registers(env
);
881 env
->kvm_vcpu_dirty
= 1;
885 void kvm_cpu_synchronize_state(CPUState
*env
)
887 if (!env
->kvm_vcpu_dirty
) {
888 run_on_cpu(env
, do_kvm_cpu_synchronize_state
, env
);
892 void kvm_cpu_synchronize_post_reset(CPUState
*env
)
894 kvm_arch_put_registers(env
, KVM_PUT_RESET_STATE
);
895 env
->kvm_vcpu_dirty
= 0;
898 void kvm_cpu_synchronize_post_init(CPUState
*env
)
900 kvm_arch_put_registers(env
, KVM_PUT_FULL_STATE
);
901 env
->kvm_vcpu_dirty
= 0;
904 int kvm_cpu_exec(CPUState
*env
)
906 struct kvm_run
*run
= env
->kvm_run
;
909 DPRINTF("kvm_cpu_exec()\n");
911 if (kvm_arch_process_async_events(env
)) {
912 env
->exit_request
= 0;
916 cpu_single_env
= env
;
919 if (env
->kvm_vcpu_dirty
) {
920 kvm_arch_put_registers(env
, KVM_PUT_RUNTIME_STATE
);
921 env
->kvm_vcpu_dirty
= 0;
924 kvm_arch_pre_run(env
, run
);
925 if (env
->exit_request
) {
926 DPRINTF("interrupt exit requested\n");
928 * KVM requires us to reenter the kernel after IO exits to complete
929 * instruction emulation. This self-signal will ensure that we
932 qemu_cpu_kick_self();
934 cpu_single_env
= NULL
;
935 qemu_mutex_unlock_iothread();
937 run_ret
= kvm_vcpu_ioctl(env
, KVM_RUN
, 0);
939 qemu_mutex_lock_iothread();
940 cpu_single_env
= env
;
941 kvm_arch_post_run(env
, run
);
943 kvm_flush_coalesced_mmio_buffer();
946 if (run_ret
== -EINTR
|| run_ret
== -EAGAIN
) {
947 DPRINTF("io window exit\n");
948 ret
= EXCP_INTERRUPT
;
951 DPRINTF("kvm run failed %s\n", strerror(-run_ret
));
955 switch (run
->exit_reason
) {
957 DPRINTF("handle_io\n");
958 kvm_handle_io(run
->io
.port
,
959 (uint8_t *)run
+ run
->io
.data_offset
,
966 DPRINTF("handle_mmio\n");
967 cpu_physical_memory_rw(run
->mmio
.phys_addr
,
973 case KVM_EXIT_IRQ_WINDOW_OPEN
:
974 DPRINTF("irq_window_open\n");
975 ret
= EXCP_INTERRUPT
;
977 case KVM_EXIT_SHUTDOWN
:
978 DPRINTF("shutdown\n");
979 qemu_system_reset_request();
980 ret
= EXCP_INTERRUPT
;
982 case KVM_EXIT_UNKNOWN
:
983 fprintf(stderr
, "KVM: unknown exit, hardware reason %" PRIx64
"\n",
984 (uint64_t)run
->hw
.hardware_exit_reason
);
987 #ifdef KVM_CAP_INTERNAL_ERROR_DATA
988 case KVM_EXIT_INTERNAL_ERROR
:
989 ret
= kvm_handle_internal_error(env
, run
);
993 DPRINTF("kvm_arch_handle_exit\n");
994 ret
= kvm_arch_handle_exit(env
, run
);
1000 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_CODE
);
1001 vm_stop(VMSTOP_PANIC
);
1004 env
->exit_request
= 0;
1005 cpu_single_env
= NULL
;
1009 int kvm_ioctl(KVMState
*s
, int type
, ...)
1016 arg
= va_arg(ap
, void *);
1019 ret
= ioctl(s
->fd
, type
, arg
);
1026 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
1033 arg
= va_arg(ap
, void *);
1036 ret
= ioctl(s
->vmfd
, type
, arg
);
1043 int kvm_vcpu_ioctl(CPUState
*env
, int type
, ...)
1050 arg
= va_arg(ap
, void *);
1053 ret
= ioctl(env
->kvm_fd
, type
, arg
);
1060 int kvm_has_sync_mmu(void)
1062 return kvm_check_extension(kvm_state
, KVM_CAP_SYNC_MMU
);
1065 int kvm_has_vcpu_events(void)
1067 return kvm_state
->vcpu_events
;
1070 int kvm_has_robust_singlestep(void)
1072 return kvm_state
->robust_singlestep
;
1075 int kvm_has_debugregs(void)
1077 return kvm_state
->debugregs
;
1080 int kvm_has_xsave(void)
1082 return kvm_state
->xsave
;
1085 int kvm_has_xcrs(void)
1087 return kvm_state
->xcrs
;
1090 int kvm_has_many_ioeventfds(void)
1092 if (!kvm_enabled()) {
1095 return kvm_state
->many_ioeventfds
;
1098 void kvm_setup_guest_memory(void *start
, size_t size
)
1100 if (!kvm_has_sync_mmu()) {
1101 int ret
= qemu_madvise(start
, size
, QEMU_MADV_DONTFORK
);
1104 perror("qemu_madvise");
1106 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1112 #ifdef KVM_CAP_SET_GUEST_DEBUG
1113 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*env
,
1116 struct kvm_sw_breakpoint
*bp
;
1118 QTAILQ_FOREACH(bp
, &env
->kvm_state
->kvm_sw_breakpoints
, entry
) {
1126 int kvm_sw_breakpoints_active(CPUState
*env
)
1128 return !QTAILQ_EMPTY(&env
->kvm_state
->kvm_sw_breakpoints
);
1131 struct kvm_set_guest_debug_data
{
1132 struct kvm_guest_debug dbg
;
1137 static void kvm_invoke_set_guest_debug(void *data
)
1139 struct kvm_set_guest_debug_data
*dbg_data
= data
;
1140 CPUState
*env
= dbg_data
->env
;
1142 dbg_data
->err
= kvm_vcpu_ioctl(env
, KVM_SET_GUEST_DEBUG
, &dbg_data
->dbg
);
1145 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1147 struct kvm_set_guest_debug_data data
;
1149 data
.dbg
.control
= reinject_trap
;
1151 if (env
->singlestep_enabled
) {
1152 data
.dbg
.control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
1154 kvm_arch_update_guest_debug(env
, &data
.dbg
);
1157 run_on_cpu(env
, kvm_invoke_set_guest_debug
, &data
);
1161 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1162 target_ulong len
, int type
)
1164 struct kvm_sw_breakpoint
*bp
;
1168 if (type
== GDB_BREAKPOINT_SW
) {
1169 bp
= kvm_find_sw_breakpoint(current_env
, addr
);
1175 bp
= qemu_malloc(sizeof(struct kvm_sw_breakpoint
));
1182 err
= kvm_arch_insert_sw_breakpoint(current_env
, bp
);
1188 QTAILQ_INSERT_HEAD(¤t_env
->kvm_state
->kvm_sw_breakpoints
,
1191 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
1197 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1198 err
= kvm_update_guest_debug(env
, 0);
1206 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1207 target_ulong len
, int type
)
1209 struct kvm_sw_breakpoint
*bp
;
1213 if (type
== GDB_BREAKPOINT_SW
) {
1214 bp
= kvm_find_sw_breakpoint(current_env
, addr
);
1219 if (bp
->use_count
> 1) {
1224 err
= kvm_arch_remove_sw_breakpoint(current_env
, bp
);
1229 QTAILQ_REMOVE(¤t_env
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
1232 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
1238 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1239 err
= kvm_update_guest_debug(env
, 0);
1247 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1249 struct kvm_sw_breakpoint
*bp
, *next
;
1250 KVMState
*s
= current_env
->kvm_state
;
1253 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
1254 if (kvm_arch_remove_sw_breakpoint(current_env
, bp
) != 0) {
1255 /* Try harder to find a CPU that currently sees the breakpoint. */
1256 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1257 if (kvm_arch_remove_sw_breakpoint(env
, bp
) == 0) {
1263 kvm_arch_remove_all_hw_breakpoints();
1265 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1266 kvm_update_guest_debug(env
, 0);
1270 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1272 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1277 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1278 target_ulong len
, int type
)
1283 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1284 target_ulong len
, int type
)
1289 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1292 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1294 int kvm_set_signal_mask(CPUState
*env
, const sigset_t
*sigset
)
1296 struct kvm_signal_mask
*sigmask
;
1300 return kvm_vcpu_ioctl(env
, KVM_SET_SIGNAL_MASK
, NULL
);
1303 sigmask
= qemu_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
1306 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
1307 r
= kvm_vcpu_ioctl(env
, KVM_SET_SIGNAL_MASK
, sigmask
);
1313 int kvm_set_ioeventfd_mmio_long(int fd
, uint32_t addr
, uint32_t val
, bool assign
)
1315 #ifdef KVM_IOEVENTFD
1317 struct kvm_ioeventfd iofd
;
1319 iofd
.datamatch
= val
;
1322 iofd
.flags
= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1325 if (!kvm_enabled()) {
1330 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1333 ret
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &iofd
);
1345 int kvm_set_ioeventfd_pio_word(int fd
, uint16_t addr
, uint16_t val
, bool assign
)
1347 #ifdef KVM_IOEVENTFD
1348 struct kvm_ioeventfd kick
= {
1352 .flags
= KVM_IOEVENTFD_FLAG_DATAMATCH
| KVM_IOEVENTFD_FLAG_PIO
,
1356 if (!kvm_enabled()) {
1360 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1362 r
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
1372 int kvm_on_sigbus_vcpu(CPUState
*env
, int code
, void *addr
)
1374 return kvm_arch_on_sigbus_vcpu(env
, code
, addr
);
1377 int kvm_on_sigbus(int code
, void *addr
)
1379 return kvm_arch_on_sigbus(code
, addr
);