4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
24 #include "qemu-barrier.h"
31 /* This check must be after config-host.h is included */
33 #include <sys/eventfd.h>
36 /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
37 #define PAGE_SIZE TARGET_PAGE_SIZE
42 #define DPRINTF(fmt, ...) \
43 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
45 #define DPRINTF(fmt, ...) \
49 typedef struct KVMSlot
51 target_phys_addr_t start_addr
;
52 ram_addr_t memory_size
;
53 ram_addr_t phys_offset
;
58 typedef struct kvm_dirty_log KVMDirtyLog
;
66 #ifdef KVM_CAP_COALESCED_MMIO
67 struct kvm_coalesced_mmio_ring
*coalesced_mmio_ring
;
69 int broken_set_mem_region
;
72 int robust_singlestep
;
74 #ifdef KVM_CAP_SET_GUEST_DEBUG
75 struct kvm_sw_breakpoint_head kvm_sw_breakpoints
;
77 int irqchip_in_kernel
;
83 static KVMState
*kvm_state
;
85 static KVMSlot
*kvm_alloc_slot(KVMState
*s
)
89 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
90 /* KVM private memory slots */
93 if (s
->slots
[i
].memory_size
== 0)
97 fprintf(stderr
, "%s: no free slot available\n", __func__
);
101 static KVMSlot
*kvm_lookup_matching_slot(KVMState
*s
,
102 target_phys_addr_t start_addr
,
103 target_phys_addr_t end_addr
)
107 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
108 KVMSlot
*mem
= &s
->slots
[i
];
110 if (start_addr
== mem
->start_addr
&&
111 end_addr
== mem
->start_addr
+ mem
->memory_size
) {
120 * Find overlapping slot with lowest start address
122 static KVMSlot
*kvm_lookup_overlapping_slot(KVMState
*s
,
123 target_phys_addr_t start_addr
,
124 target_phys_addr_t end_addr
)
126 KVMSlot
*found
= NULL
;
129 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
130 KVMSlot
*mem
= &s
->slots
[i
];
132 if (mem
->memory_size
== 0 ||
133 (found
&& found
->start_addr
< mem
->start_addr
)) {
137 if (end_addr
> mem
->start_addr
&&
138 start_addr
< mem
->start_addr
+ mem
->memory_size
) {
146 int kvm_physical_memory_addr_from_ram(KVMState
*s
, ram_addr_t ram_addr
,
147 target_phys_addr_t
*phys_addr
)
151 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
152 KVMSlot
*mem
= &s
->slots
[i
];
154 if (ram_addr
>= mem
->phys_offset
&&
155 ram_addr
< mem
->phys_offset
+ mem
->memory_size
) {
156 *phys_addr
= mem
->start_addr
+ (ram_addr
- mem
->phys_offset
);
164 static int kvm_set_user_memory_region(KVMState
*s
, KVMSlot
*slot
)
166 struct kvm_userspace_memory_region mem
;
168 mem
.slot
= slot
->slot
;
169 mem
.guest_phys_addr
= slot
->start_addr
;
170 mem
.memory_size
= slot
->memory_size
;
171 mem
.userspace_addr
= (unsigned long)qemu_safe_ram_ptr(slot
->phys_offset
);
172 mem
.flags
= slot
->flags
;
173 if (s
->migration_log
) {
174 mem
.flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
176 return kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
179 static void kvm_reset_vcpu(void *opaque
)
181 CPUState
*env
= opaque
;
183 kvm_arch_reset_vcpu(env
);
186 int kvm_irqchip_in_kernel(void)
188 return kvm_state
->irqchip_in_kernel
;
191 int kvm_pit_in_kernel(void)
193 return kvm_state
->pit_in_kernel
;
197 int kvm_init_vcpu(CPUState
*env
)
199 KVMState
*s
= kvm_state
;
203 DPRINTF("kvm_init_vcpu\n");
205 ret
= kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, env
->cpu_index
);
207 DPRINTF("kvm_create_vcpu failed\n");
214 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
216 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
220 env
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
222 if (env
->kvm_run
== MAP_FAILED
) {
224 DPRINTF("mmap'ing vcpu state failed\n");
228 #ifdef KVM_CAP_COALESCED_MMIO
229 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
)
230 s
->coalesced_mmio_ring
= (void *) env
->kvm_run
+
231 s
->coalesced_mmio
* PAGE_SIZE
;
234 ret
= kvm_arch_init_vcpu(env
);
236 qemu_register_reset(kvm_reset_vcpu
, env
);
237 kvm_arch_reset_vcpu(env
);
244 * dirty pages logging control
246 static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr
,
247 ram_addr_t size
, int flags
, int mask
)
249 KVMState
*s
= kvm_state
;
250 KVMSlot
*mem
= kvm_lookup_matching_slot(s
, phys_addr
, phys_addr
+ size
);
254 fprintf(stderr
, "BUG: %s: invalid parameters " TARGET_FMT_plx
"-"
255 TARGET_FMT_plx
"\n", __func__
, phys_addr
,
256 (target_phys_addr_t
)(phys_addr
+ size
- 1));
260 old_flags
= mem
->flags
;
262 flags
= (mem
->flags
& ~mask
) | flags
;
265 /* If nothing changed effectively, no need to issue ioctl */
266 if (s
->migration_log
) {
267 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
269 if (flags
== old_flags
) {
273 return kvm_set_user_memory_region(s
, mem
);
276 int kvm_log_start(target_phys_addr_t phys_addr
, ram_addr_t size
)
278 return kvm_dirty_pages_log_change(phys_addr
, size
,
279 KVM_MEM_LOG_DIRTY_PAGES
,
280 KVM_MEM_LOG_DIRTY_PAGES
);
283 int kvm_log_stop(target_phys_addr_t phys_addr
, ram_addr_t size
)
285 return kvm_dirty_pages_log_change(phys_addr
, size
,
287 KVM_MEM_LOG_DIRTY_PAGES
);
290 static int kvm_set_migration_log(int enable
)
292 KVMState
*s
= kvm_state
;
296 s
->migration_log
= enable
;
298 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
301 if (!mem
->memory_size
) {
304 if (!!(mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) == enable
) {
307 err
= kvm_set_user_memory_region(s
, mem
);
315 /* get kvm's dirty pages bitmap and update qemu's */
316 static int kvm_get_dirty_pages_log_range(unsigned long start_addr
,
317 unsigned long *bitmap
,
318 unsigned long offset
,
319 unsigned long mem_size
)
322 unsigned long page_number
, addr
, addr1
, c
;
324 unsigned int len
= ((mem_size
/ TARGET_PAGE_SIZE
) + HOST_LONG_BITS
- 1) /
328 * bitmap-traveling is faster than memory-traveling (for addr...)
329 * especially when most of the memory is not dirty.
331 for (i
= 0; i
< len
; i
++) {
332 if (bitmap
[i
] != 0) {
333 c
= leul_to_cpu(bitmap
[i
]);
337 page_number
= i
* HOST_LONG_BITS
+ j
;
338 addr1
= page_number
* TARGET_PAGE_SIZE
;
339 addr
= offset
+ addr1
;
340 ram_addr
= cpu_get_physical_page_desc(addr
);
341 cpu_physical_memory_set_dirty(ram_addr
);
348 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
351 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
352 * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
353 * This means all bits are set to dirty.
355 * @start_add: start of logged region.
356 * @end_addr: end of logged region.
358 static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
359 target_phys_addr_t end_addr
)
361 KVMState
*s
= kvm_state
;
362 unsigned long size
, allocated_size
= 0;
367 d
.dirty_bitmap
= NULL
;
368 while (start_addr
< end_addr
) {
369 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, end_addr
);
374 size
= ALIGN(((mem
->memory_size
) >> TARGET_PAGE_BITS
), HOST_LONG_BITS
) / 8;
375 if (!d
.dirty_bitmap
) {
376 d
.dirty_bitmap
= qemu_malloc(size
);
377 } else if (size
> allocated_size
) {
378 d
.dirty_bitmap
= qemu_realloc(d
.dirty_bitmap
, size
);
380 allocated_size
= size
;
381 memset(d
.dirty_bitmap
, 0, allocated_size
);
385 if (kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
) == -1) {
386 DPRINTF("ioctl failed %d\n", errno
);
391 kvm_get_dirty_pages_log_range(mem
->start_addr
, d
.dirty_bitmap
,
392 mem
->start_addr
, mem
->memory_size
);
393 start_addr
= mem
->start_addr
+ mem
->memory_size
;
395 qemu_free(d
.dirty_bitmap
);
400 int kvm_coalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
403 #ifdef KVM_CAP_COALESCED_MMIO
404 KVMState
*s
= kvm_state
;
406 if (s
->coalesced_mmio
) {
407 struct kvm_coalesced_mmio_zone zone
;
412 ret
= kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
419 int kvm_uncoalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
422 #ifdef KVM_CAP_COALESCED_MMIO
423 KVMState
*s
= kvm_state
;
425 if (s
->coalesced_mmio
) {
426 struct kvm_coalesced_mmio_zone zone
;
431 ret
= kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
438 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
442 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
450 static int kvm_check_many_ioeventfds(void)
452 /* Userspace can use ioeventfd for io notification. This requires a host
453 * that supports eventfd(2) and an I/O thread; since eventfd does not
454 * support SIGIO it cannot interrupt the vcpu.
456 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
457 * can avoid creating too many ioeventfds.
459 #if defined(CONFIG_EVENTFD) && defined(CONFIG_IOTHREAD)
462 for (i
= 0; i
< ARRAY_SIZE(ioeventfds
); i
++) {
463 ioeventfds
[i
] = eventfd(0, EFD_CLOEXEC
);
464 if (ioeventfds
[i
] < 0) {
467 ret
= kvm_set_ioeventfd_pio_word(ioeventfds
[i
], 0, i
, true);
469 close(ioeventfds
[i
]);
474 /* Decide whether many devices are supported or not */
475 ret
= i
== ARRAY_SIZE(ioeventfds
);
478 kvm_set_ioeventfd_pio_word(ioeventfds
[i
], 0, i
, false);
479 close(ioeventfds
[i
]);
487 static void kvm_set_phys_mem(target_phys_addr_t start_addr
,
489 ram_addr_t phys_offset
)
491 KVMState
*s
= kvm_state
;
492 ram_addr_t flags
= phys_offset
& ~TARGET_PAGE_MASK
;
496 /* kvm works in page size chunks, but the function may be called
497 with sub-page size and unaligned start address. */
498 size
= TARGET_PAGE_ALIGN(size
);
499 start_addr
= TARGET_PAGE_ALIGN(start_addr
);
501 /* KVM does not support read-only slots */
502 phys_offset
&= ~IO_MEM_ROM
;
505 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, start_addr
+ size
);
510 if (flags
< IO_MEM_UNASSIGNED
&& start_addr
>= mem
->start_addr
&&
511 (start_addr
+ size
<= mem
->start_addr
+ mem
->memory_size
) &&
512 (phys_offset
- start_addr
== mem
->phys_offset
- mem
->start_addr
)) {
513 /* The new slot fits into the existing one and comes with
514 * identical parameters - nothing to be done. */
520 /* unregister the overlapping slot */
521 mem
->memory_size
= 0;
522 err
= kvm_set_user_memory_region(s
, mem
);
524 fprintf(stderr
, "%s: error unregistering overlapping slot: %s\n",
525 __func__
, strerror(-err
));
529 /* Workaround for older KVM versions: we can't join slots, even not by
530 * unregistering the previous ones and then registering the larger
531 * slot. We have to maintain the existing fragmentation. Sigh.
533 * This workaround assumes that the new slot starts at the same
534 * address as the first existing one. If not or if some overlapping
535 * slot comes around later, we will fail (not seen in practice so far)
536 * - and actually require a recent KVM version. */
537 if (s
->broken_set_mem_region
&&
538 old
.start_addr
== start_addr
&& old
.memory_size
< size
&&
539 flags
< IO_MEM_UNASSIGNED
) {
540 mem
= kvm_alloc_slot(s
);
541 mem
->memory_size
= old
.memory_size
;
542 mem
->start_addr
= old
.start_addr
;
543 mem
->phys_offset
= old
.phys_offset
;
546 err
= kvm_set_user_memory_region(s
, mem
);
548 fprintf(stderr
, "%s: error updating slot: %s\n", __func__
,
553 start_addr
+= old
.memory_size
;
554 phys_offset
+= old
.memory_size
;
555 size
-= old
.memory_size
;
559 /* register prefix slot */
560 if (old
.start_addr
< start_addr
) {
561 mem
= kvm_alloc_slot(s
);
562 mem
->memory_size
= start_addr
- old
.start_addr
;
563 mem
->start_addr
= old
.start_addr
;
564 mem
->phys_offset
= old
.phys_offset
;
567 err
= kvm_set_user_memory_region(s
, mem
);
569 fprintf(stderr
, "%s: error registering prefix slot: %s\n",
570 __func__
, strerror(-err
));
575 /* register suffix slot */
576 if (old
.start_addr
+ old
.memory_size
> start_addr
+ size
) {
577 ram_addr_t size_delta
;
579 mem
= kvm_alloc_slot(s
);
580 mem
->start_addr
= start_addr
+ size
;
581 size_delta
= mem
->start_addr
- old
.start_addr
;
582 mem
->memory_size
= old
.memory_size
- size_delta
;
583 mem
->phys_offset
= old
.phys_offset
+ size_delta
;
586 err
= kvm_set_user_memory_region(s
, mem
);
588 fprintf(stderr
, "%s: error registering suffix slot: %s\n",
589 __func__
, strerror(-err
));
595 /* in case the KVM bug workaround already "consumed" the new slot */
599 /* KVM does not need to know about this memory */
600 if (flags
>= IO_MEM_UNASSIGNED
)
603 mem
= kvm_alloc_slot(s
);
604 mem
->memory_size
= size
;
605 mem
->start_addr
= start_addr
;
606 mem
->phys_offset
= phys_offset
;
609 err
= kvm_set_user_memory_region(s
, mem
);
611 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
617 static void kvm_client_set_memory(struct CPUPhysMemoryClient
*client
,
618 target_phys_addr_t start_addr
,
620 ram_addr_t phys_offset
)
622 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
625 static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient
*client
,
626 target_phys_addr_t start_addr
,
627 target_phys_addr_t end_addr
)
629 return kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
632 static int kvm_client_migration_log(struct CPUPhysMemoryClient
*client
,
635 return kvm_set_migration_log(enable
);
638 static CPUPhysMemoryClient kvm_cpu_phys_memory_client
= {
639 .set_memory
= kvm_client_set_memory
,
640 .sync_dirty_bitmap
= kvm_client_sync_dirty_bitmap
,
641 .migration_log
= kvm_client_migration_log
,
644 int kvm_init(int smp_cpus
)
646 static const char upgrade_note
[] =
647 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
648 "(see http://sourceforge.net/projects/kvm).\n";
653 s
= qemu_mallocz(sizeof(KVMState
));
655 #ifdef KVM_CAP_SET_GUEST_DEBUG
656 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
658 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++)
659 s
->slots
[i
].slot
= i
;
662 s
->fd
= qemu_open("/dev/kvm", O_RDWR
);
664 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
669 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
670 if (ret
< KVM_API_VERSION
) {
673 fprintf(stderr
, "kvm version too old\n");
677 if (ret
> KVM_API_VERSION
) {
679 fprintf(stderr
, "kvm version not supported\n");
683 s
->vmfd
= kvm_ioctl(s
, KVM_CREATE_VM
, 0);
686 fprintf(stderr
, "Please add the 'switch_amode' kernel parameter to "
687 "your host kernel command line\n");
692 /* initially, KVM allocated its own memory and we had to jump through
693 * hooks to make phys_ram_base point to this. Modern versions of KVM
694 * just use a user allocated buffer so we can use regular pages
695 * unmodified. Make sure we have a sufficiently modern version of KVM.
697 if (!kvm_check_extension(s
, KVM_CAP_USER_MEMORY
)) {
699 fprintf(stderr
, "kvm does not support KVM_CAP_USER_MEMORY\n%s",
704 /* There was a nasty bug in < kvm-80 that prevents memory slots from being
705 * destroyed properly. Since we rely on this capability, refuse to work
706 * with any kernel without this capability. */
707 if (!kvm_check_extension(s
, KVM_CAP_DESTROY_MEMORY_REGION_WORKS
)) {
711 "KVM kernel module broken (DESTROY_MEMORY_REGION).\n%s",
716 s
->coalesced_mmio
= 0;
717 #ifdef KVM_CAP_COALESCED_MMIO
718 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
719 s
->coalesced_mmio_ring
= NULL
;
722 s
->broken_set_mem_region
= 1;
723 #ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
724 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
);
726 s
->broken_set_mem_region
= 0;
731 #ifdef KVM_CAP_VCPU_EVENTS
732 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
735 s
->robust_singlestep
= 0;
736 #ifdef KVM_CAP_X86_ROBUST_SINGLESTEP
737 s
->robust_singlestep
=
738 kvm_check_extension(s
, KVM_CAP_X86_ROBUST_SINGLESTEP
);
742 #ifdef KVM_CAP_DEBUGREGS
743 s
->debugregs
= kvm_check_extension(s
, KVM_CAP_DEBUGREGS
);
748 s
->xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
753 s
->xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
756 ret
= kvm_arch_init(s
, smp_cpus
);
761 cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client
);
763 s
->many_ioeventfds
= kvm_check_many_ioeventfds();
779 static int kvm_handle_io(uint16_t port
, void *data
, int direction
, int size
,
785 for (i
= 0; i
< count
; i
++) {
786 if (direction
== KVM_EXIT_IO_IN
) {
789 stb_p(ptr
, cpu_inb(port
));
792 stw_p(ptr
, cpu_inw(port
));
795 stl_p(ptr
, cpu_inl(port
));
801 cpu_outb(port
, ldub_p(ptr
));
804 cpu_outw(port
, lduw_p(ptr
));
807 cpu_outl(port
, ldl_p(ptr
));
818 #ifdef KVM_CAP_INTERNAL_ERROR_DATA
819 static void kvm_handle_internal_error(CPUState
*env
, struct kvm_run
*run
)
822 if (kvm_check_extension(kvm_state
, KVM_CAP_INTERNAL_ERROR_DATA
)) {
825 fprintf(stderr
, "KVM internal error. Suberror: %d\n",
826 run
->internal
.suberror
);
828 for (i
= 0; i
< run
->internal
.ndata
; ++i
) {
829 fprintf(stderr
, "extra data[%d]: %"PRIx64
"\n",
830 i
, (uint64_t)run
->internal
.data
[i
]);
833 cpu_dump_state(env
, stderr
, fprintf
, 0);
834 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
) {
835 fprintf(stderr
, "emulation failure\n");
836 if (!kvm_arch_stop_on_emulation_error(env
))
839 /* FIXME: Should trigger a qmp message to let management know
840 * something went wrong.
846 void kvm_flush_coalesced_mmio_buffer(void)
848 #ifdef KVM_CAP_COALESCED_MMIO
849 KVMState
*s
= kvm_state
;
850 if (s
->coalesced_mmio_ring
) {
851 struct kvm_coalesced_mmio_ring
*ring
= s
->coalesced_mmio_ring
;
852 while (ring
->first
!= ring
->last
) {
853 struct kvm_coalesced_mmio
*ent
;
855 ent
= &ring
->coalesced_mmio
[ring
->first
];
857 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
859 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
865 static void do_kvm_cpu_synchronize_state(void *_env
)
867 CPUState
*env
= _env
;
869 if (!env
->kvm_vcpu_dirty
) {
870 kvm_arch_get_registers(env
);
871 env
->kvm_vcpu_dirty
= 1;
875 void kvm_cpu_synchronize_state(CPUState
*env
)
877 if (!env
->kvm_vcpu_dirty
)
878 run_on_cpu(env
, do_kvm_cpu_synchronize_state
, env
);
881 void kvm_cpu_synchronize_post_reset(CPUState
*env
)
883 kvm_arch_put_registers(env
, KVM_PUT_RESET_STATE
);
884 env
->kvm_vcpu_dirty
= 0;
887 void kvm_cpu_synchronize_post_init(CPUState
*env
)
889 kvm_arch_put_registers(env
, KVM_PUT_FULL_STATE
);
890 env
->kvm_vcpu_dirty
= 0;
893 int kvm_cpu_exec(CPUState
*env
)
895 struct kvm_run
*run
= env
->kvm_run
;
898 DPRINTF("kvm_cpu_exec()\n");
901 #ifndef CONFIG_IOTHREAD
902 if (env
->exit_request
) {
903 DPRINTF("interrupt exit requested\n");
909 if (kvm_arch_process_irqchip_events(env
)) {
914 if (env
->kvm_vcpu_dirty
) {
915 kvm_arch_put_registers(env
, KVM_PUT_RUNTIME_STATE
);
916 env
->kvm_vcpu_dirty
= 0;
919 kvm_arch_pre_run(env
, run
);
920 cpu_single_env
= NULL
;
921 qemu_mutex_unlock_iothread();
922 ret
= kvm_vcpu_ioctl(env
, KVM_RUN
, 0);
923 qemu_mutex_lock_iothread();
924 cpu_single_env
= env
;
925 kvm_arch_post_run(env
, run
);
927 if (ret
== -EINTR
|| ret
== -EAGAIN
) {
929 DPRINTF("io window exit\n");
935 DPRINTF("kvm run failed %s\n", strerror(-ret
));
939 kvm_flush_coalesced_mmio_buffer();
941 ret
= 0; /* exit loop */
942 switch (run
->exit_reason
) {
944 DPRINTF("handle_io\n");
945 ret
= kvm_handle_io(run
->io
.port
,
946 (uint8_t *)run
+ run
->io
.data_offset
,
952 DPRINTF("handle_mmio\n");
953 cpu_physical_memory_rw(run
->mmio
.phys_addr
,
959 case KVM_EXIT_IRQ_WINDOW_OPEN
:
960 DPRINTF("irq_window_open\n");
962 case KVM_EXIT_SHUTDOWN
:
963 DPRINTF("shutdown\n");
964 qemu_system_reset_request();
967 case KVM_EXIT_UNKNOWN
:
968 DPRINTF("kvm_exit_unknown\n");
970 case KVM_EXIT_FAIL_ENTRY
:
971 DPRINTF("kvm_exit_fail_entry\n");
973 case KVM_EXIT_EXCEPTION
:
974 DPRINTF("kvm_exit_exception\n");
976 #ifdef KVM_CAP_INTERNAL_ERROR_DATA
977 case KVM_EXIT_INTERNAL_ERROR
:
978 kvm_handle_internal_error(env
, run
);
982 DPRINTF("kvm_exit_debug\n");
983 #ifdef KVM_CAP_SET_GUEST_DEBUG
984 if (kvm_arch_debug(&run
->debug
.arch
)) {
985 env
->exception_index
= EXCP_DEBUG
;
988 /* re-enter, this exception was guest-internal */
990 #endif /* KVM_CAP_SET_GUEST_DEBUG */
993 DPRINTF("kvm_arch_handle_exit\n");
994 ret
= kvm_arch_handle_exit(env
, run
);
999 if (env
->exit_request
) {
1000 env
->exit_request
= 0;
1001 env
->exception_index
= EXCP_INTERRUPT
;
1007 int kvm_ioctl(KVMState
*s
, int type
, ...)
1014 arg
= va_arg(ap
, void *);
1017 ret
= ioctl(s
->fd
, type
, arg
);
1024 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
1031 arg
= va_arg(ap
, void *);
1034 ret
= ioctl(s
->vmfd
, type
, arg
);
1041 int kvm_vcpu_ioctl(CPUState
*env
, int type
, ...)
1048 arg
= va_arg(ap
, void *);
1051 ret
= ioctl(env
->kvm_fd
, type
, arg
);
1058 int kvm_has_sync_mmu(void)
1060 #ifdef KVM_CAP_SYNC_MMU
1061 KVMState
*s
= kvm_state
;
1063 return kvm_check_extension(s
, KVM_CAP_SYNC_MMU
);
1069 int kvm_has_vcpu_events(void)
1071 return kvm_state
->vcpu_events
;
1074 int kvm_has_robust_singlestep(void)
1076 return kvm_state
->robust_singlestep
;
1079 int kvm_has_debugregs(void)
1081 return kvm_state
->debugregs
;
1084 int kvm_has_xsave(void)
1086 return kvm_state
->xsave
;
1089 int kvm_has_xcrs(void)
1091 return kvm_state
->xcrs
;
1094 int kvm_has_many_ioeventfds(void)
1096 if (!kvm_enabled()) {
1099 return kvm_state
->many_ioeventfds
;
1102 void kvm_setup_guest_memory(void *start
, size_t size
)
1104 if (!kvm_has_sync_mmu()) {
1105 int ret
= qemu_madvise(start
, size
, QEMU_MADV_DONTFORK
);
1108 perror("qemu_madvise");
1110 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1116 #ifdef KVM_CAP_SET_GUEST_DEBUG
1117 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*env
,
1120 struct kvm_sw_breakpoint
*bp
;
1122 QTAILQ_FOREACH(bp
, &env
->kvm_state
->kvm_sw_breakpoints
, entry
) {
1129 int kvm_sw_breakpoints_active(CPUState
*env
)
1131 return !QTAILQ_EMPTY(&env
->kvm_state
->kvm_sw_breakpoints
);
1134 struct kvm_set_guest_debug_data
{
1135 struct kvm_guest_debug dbg
;
1140 static void kvm_invoke_set_guest_debug(void *data
)
1142 struct kvm_set_guest_debug_data
*dbg_data
= data
;
1143 CPUState
*env
= dbg_data
->env
;
1145 dbg_data
->err
= kvm_vcpu_ioctl(env
, KVM_SET_GUEST_DEBUG
, &dbg_data
->dbg
);
1148 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1150 struct kvm_set_guest_debug_data data
;
1152 data
.dbg
.control
= reinject_trap
;
1154 if (env
->singlestep_enabled
) {
1155 data
.dbg
.control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
1157 kvm_arch_update_guest_debug(env
, &data
.dbg
);
1160 run_on_cpu(env
, kvm_invoke_set_guest_debug
, &data
);
1164 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1165 target_ulong len
, int type
)
1167 struct kvm_sw_breakpoint
*bp
;
1171 if (type
== GDB_BREAKPOINT_SW
) {
1172 bp
= kvm_find_sw_breakpoint(current_env
, addr
);
1178 bp
= qemu_malloc(sizeof(struct kvm_sw_breakpoint
));
1184 err
= kvm_arch_insert_sw_breakpoint(current_env
, bp
);
1190 QTAILQ_INSERT_HEAD(¤t_env
->kvm_state
->kvm_sw_breakpoints
,
1193 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
1198 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1199 err
= kvm_update_guest_debug(env
, 0);
1206 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1207 target_ulong len
, int type
)
1209 struct kvm_sw_breakpoint
*bp
;
1213 if (type
== GDB_BREAKPOINT_SW
) {
1214 bp
= kvm_find_sw_breakpoint(current_env
, addr
);
1218 if (bp
->use_count
> 1) {
1223 err
= kvm_arch_remove_sw_breakpoint(current_env
, bp
);
1227 QTAILQ_REMOVE(¤t_env
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
1230 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
1235 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1236 err
= kvm_update_guest_debug(env
, 0);
1243 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1245 struct kvm_sw_breakpoint
*bp
, *next
;
1246 KVMState
*s
= current_env
->kvm_state
;
1249 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
1250 if (kvm_arch_remove_sw_breakpoint(current_env
, bp
) != 0) {
1251 /* Try harder to find a CPU that currently sees the breakpoint. */
1252 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1253 if (kvm_arch_remove_sw_breakpoint(env
, bp
) == 0)
1258 kvm_arch_remove_all_hw_breakpoints();
1260 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
1261 kvm_update_guest_debug(env
, 0);
1264 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1266 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1271 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1272 target_ulong len
, int type
)
1277 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1278 target_ulong len
, int type
)
1283 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1286 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1288 int kvm_set_signal_mask(CPUState
*env
, const sigset_t
*sigset
)
1290 struct kvm_signal_mask
*sigmask
;
1294 return kvm_vcpu_ioctl(env
, KVM_SET_SIGNAL_MASK
, NULL
);
1296 sigmask
= qemu_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
1299 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
1300 r
= kvm_vcpu_ioctl(env
, KVM_SET_SIGNAL_MASK
, sigmask
);
1306 int kvm_set_ioeventfd_mmio_long(int fd
, uint32_t addr
, uint32_t val
, bool assign
)
1308 #ifdef KVM_IOEVENTFD
1310 struct kvm_ioeventfd iofd
;
1312 iofd
.datamatch
= val
;
1315 iofd
.flags
= KVM_IOEVENTFD_FLAG_DATAMATCH
;
1318 if (!kvm_enabled()) {
1323 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1326 ret
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &iofd
);
1338 int kvm_set_ioeventfd_pio_word(int fd
, uint16_t addr
, uint16_t val
, bool assign
)
1340 #ifdef KVM_IOEVENTFD
1341 struct kvm_ioeventfd kick
= {
1345 .flags
= KVM_IOEVENTFD_FLAG_DATAMATCH
| KVM_IOEVENTFD_FLAG_PIO
,
1352 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1353 r
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);