4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
15 #include "qemu-common.h"
25 #include <sys/utsname.h>
26 #include <sys/syscall.h>
28 #include <sys/ioctl.h>
34 #define EXPECTED_KVM_API_VERSION 12
36 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
37 #error libkvm: userspace and kernel version mismatch
43 int kvm_pit_reinject
= 1;
48 kvm_context_t kvm_context
;
50 pthread_mutex_t qemu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
51 pthread_cond_t qemu_vcpu_cond
= PTHREAD_COND_INITIALIZER
;
52 pthread_cond_t qemu_system_cond
= PTHREAD_COND_INITIALIZER
;
53 pthread_cond_t qemu_pause_cond
= PTHREAD_COND_INITIALIZER
;
54 pthread_cond_t qemu_work_cond
= PTHREAD_COND_INITIALIZER
;
55 __thread CPUState
*current_env
;
57 static int qemu_system_ready
;
59 #define SIG_IPI (SIGRTMIN+4)
62 static int io_thread_fd
= -1;
63 static int io_thread_sigfd
= -1;
65 static CPUState
*kvm_debug_cpu_requested
;
67 static uint64_t phys_ram_size
;
69 /* The list of ioperm_data */
70 static LIST_HEAD(, ioperm_data
) ioperm_head
;
72 //#define DEBUG_MEMREG
74 #define DPRINTF(fmt, args...) \
75 do { fprintf(stderr, "%s:%d " fmt , __func__, __LINE__, ##args); } while (0)
77 #define DPRINTF(fmt, args...) do {} while (0)
80 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
82 int kvm_abi
= EXPECTED_KVM_API_VERSION
;
85 #ifdef KVM_CAP_SET_GUEST_DEBUG
86 static int kvm_debug(void *opaque
, void *data
,
87 struct kvm_debug_exit_arch
*arch_info
)
89 int handle
= kvm_arch_debug(arch_info
);
93 kvm_debug_cpu_requested
= env
;
100 int kvm_mmio_read(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
102 cpu_physical_memory_rw(addr
, data
, len
, 0);
106 int kvm_mmio_write(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
108 cpu_physical_memory_rw(addr
, data
, len
, 1);
112 static int handle_unhandled(uint64_t reason
)
114 fprintf(stderr
, "kvm: unhandled exit %"PRIx64
"\n", reason
);
119 static inline void set_gsi(kvm_context_t kvm
, unsigned int gsi
)
121 uint32_t *bitmap
= kvm
->used_gsi_bitmap
;
123 if (gsi
< kvm
->max_gsi
)
124 bitmap
[gsi
/ 32] |= 1U << (gsi
% 32);
126 DPRINTF("Invalid GSI %d\n");
129 static inline void clear_gsi(kvm_context_t kvm
, unsigned int gsi
)
131 uint32_t *bitmap
= kvm
->used_gsi_bitmap
;
133 if (gsi
< kvm
->max_gsi
)
134 bitmap
[gsi
/ 32] &= ~(1U << (gsi
% 32));
136 DPRINTF("Invalid GSI %d\n");
140 unsigned long phys_addr
;
142 unsigned long userspace_addr
;
147 struct slot_info slots
[KVM_MAX_NUM_MEM_REGIONS
];
149 static void init_slots(void)
153 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
)
157 static int get_free_slot(kvm_context_t kvm
)
162 #if defined(KVM_CAP_SET_TSS_ADDR) && !defined(__s390__)
163 tss_ext
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
169 * on older kernels where the set tss ioctl is not supprted we must save
170 * slot 0 to hold the extended memory, as the vmx will use the last 3
171 * pages of this slot.
178 for (; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
)
184 static void register_slot(int slot
, unsigned long phys_addr
, unsigned long len
,
185 unsigned long userspace_addr
, unsigned flags
)
187 slots
[slot
].phys_addr
= phys_addr
;
188 slots
[slot
].len
= len
;
189 slots
[slot
].userspace_addr
= userspace_addr
;
190 slots
[slot
].flags
= flags
;
193 static void free_slot(int slot
)
196 slots
[slot
].logging_count
= 0;
199 static int get_slot(unsigned long phys_addr
)
203 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
) {
204 if (slots
[i
].len
&& slots
[i
].phys_addr
<= phys_addr
&&
205 (slots
[i
].phys_addr
+ slots
[i
].len
-1) >= phys_addr
)
211 /* Returns -1 if this slot is not totally contained on any other,
212 * and the number of the slot otherwise */
213 static int get_container_slot(uint64_t phys_addr
, unsigned long size
)
217 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
)
218 if (slots
[i
].len
&& slots
[i
].phys_addr
<= phys_addr
&&
219 (slots
[i
].phys_addr
+ slots
[i
].len
) >= phys_addr
+ size
)
224 int kvm_is_containing_region(kvm_context_t kvm
, unsigned long phys_addr
, unsigned long size
)
226 int slot
= get_container_slot(phys_addr
, size
);
233 * dirty pages logging control
235 static int kvm_dirty_pages_log_change(kvm_context_t kvm
,
236 unsigned long phys_addr
,
241 int slot
= get_slot(phys_addr
);
244 fprintf(stderr
, "BUG: %s: invalid parameters\n", __FUNCTION__
);
248 flags
= (slots
[slot
].flags
& ~mask
) | flags
;
249 if (flags
== slots
[slot
].flags
)
251 slots
[slot
].flags
= flags
;
254 struct kvm_userspace_memory_region mem
= {
256 .memory_size
= slots
[slot
].len
,
257 .guest_phys_addr
= slots
[slot
].phys_addr
,
258 .userspace_addr
= slots
[slot
].userspace_addr
,
259 .flags
= slots
[slot
].flags
,
263 DPRINTF("slot %d start %llx len %llx flags %x\n",
268 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_USER_MEMORY_REGION
, &mem
);
270 fprintf(stderr
, "%s: %m\n", __FUNCTION__
);
275 static int kvm_dirty_pages_log_change_all(kvm_context_t kvm
,
276 int (*change
)(kvm_context_t kvm
,
282 for (i
=r
=0; i
<KVM_MAX_NUM_MEM_REGIONS
&& r
==0; i
++) {
284 r
= change(kvm
, slots
[i
].phys_addr
, slots
[i
].len
);
289 int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm
,
293 int slot
= get_slot(phys_addr
);
295 DPRINTF("start %"PRIx64
" len %"PRIx64
"\n", phys_addr
, len
);
297 fprintf(stderr
, "BUG: %s: invalid parameters\n", __func__
);
301 if (slots
[slot
].logging_count
++)
304 return kvm_dirty_pages_log_change(kvm
, slots
[slot
].phys_addr
,
305 KVM_MEM_LOG_DIRTY_PAGES
,
306 KVM_MEM_LOG_DIRTY_PAGES
);
309 int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm
,
313 int slot
= get_slot(phys_addr
);
316 fprintf(stderr
, "BUG: %s: invalid parameters\n", __func__
);
320 if (--slots
[slot
].logging_count
)
323 return kvm_dirty_pages_log_change(kvm
, slots
[slot
].phys_addr
,
325 KVM_MEM_LOG_DIRTY_PAGES
);
329 * Enable dirty page logging for all memory regions
331 int kvm_dirty_pages_log_enable_all(kvm_context_t kvm
)
333 if (kvm
->dirty_pages_log_all
)
335 kvm
->dirty_pages_log_all
= 1;
336 return kvm_dirty_pages_log_change_all(kvm
,
337 kvm_dirty_pages_log_enable_slot
);
341 * Enable dirty page logging only for memory regions that were created with
342 * dirty logging enabled (disable for all other memory regions).
344 int kvm_dirty_pages_log_reset(kvm_context_t kvm
)
346 if (!kvm
->dirty_pages_log_all
)
348 kvm
->dirty_pages_log_all
= 0;
349 return kvm_dirty_pages_log_change_all(kvm
,
350 kvm_dirty_pages_log_disable_slot
);
354 static int kvm_create_context(void);
356 int kvm_init(int smp_cpus
)
362 fd
= open("/dev/kvm", O_RDWR
);
364 perror("open /dev/kvm");
367 r
= ioctl(fd
, KVM_GET_API_VERSION
, 0);
369 fprintf(stderr
, "kvm kernel version too old: "
370 "KVM_GET_API_VERSION ioctl not supported\n");
373 if (r
< EXPECTED_KVM_API_VERSION
) {
374 fprintf(stderr
, "kvm kernel version too old: "
375 "We expect API version %d or newer, but got "
377 EXPECTED_KVM_API_VERSION
, r
);
380 if (r
> EXPECTED_KVM_API_VERSION
) {
381 fprintf(stderr
, "kvm userspace version too old\n");
385 kvm_page_size
= getpagesize();
386 kvm_state
= qemu_mallocz(sizeof(*kvm_state
));
387 kvm_context
= &kvm_state
->kvm_context
;
390 kvm_state
->vmfd
= -1;
391 kvm_context
->opaque
= cpu_single_env
;
392 kvm_context
->dirty_pages_log_all
= 0;
393 kvm_context
->no_irqchip_creation
= 0;
394 kvm_context
->no_pit_creation
= 0;
396 #ifdef KVM_CAP_SET_GUEST_DEBUG
397 TAILQ_INIT(&kvm_state
->kvm_sw_breakpoints
);
400 gsi_count
= kvm_get_gsi_count(kvm_context
);
404 /* Round up so we can search ints using ffs */
405 gsi_bits
= ALIGN(gsi_count
, 32);
406 kvm_context
->used_gsi_bitmap
= qemu_mallocz(gsi_bits
/ 8);
407 kvm_context
->max_gsi
= gsi_bits
;
409 /* Mark any over-allocated bits as already in use */
410 for (i
= gsi_count
; i
< gsi_bits
; i
++)
411 set_gsi(kvm_context
, i
);
414 pthread_mutex_lock(&qemu_mutex
);
415 return kvm_create_context();
422 static void kvm_finalize(KVMState
*s
)
425 if (kvm->vcpu_fd[0] != -1)
426 close(kvm->vcpu_fd[0]);
427 if (kvm->vm_fd != -1)
434 void kvm_disable_irqchip_creation(kvm_context_t kvm
)
436 kvm
->no_irqchip_creation
= 1;
439 void kvm_disable_pit_creation(kvm_context_t kvm
)
441 kvm
->no_pit_creation
= 1;
444 kvm_vcpu_context_t
kvm_create_vcpu(CPUState
*env
, int id
)
448 kvm_vcpu_context_t vcpu_ctx
= qemu_malloc(sizeof(struct kvm_vcpu_context
));
449 kvm_context_t kvm
= kvm_context
;
454 r
= kvm_vm_ioctl(kvm_state
, KVM_CREATE_VCPU
, id
);
456 fprintf(stderr
, "kvm_create_vcpu: %m\n");
462 env
->kvm_state
= kvm_state
;
464 mmap_size
= kvm_ioctl(kvm_state
, KVM_GET_VCPU_MMAP_SIZE
, 0);
466 fprintf(stderr
, "get vcpu mmap size: %m\n");
469 vcpu_ctx
->run
= mmap(NULL
, mmap_size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
471 if (vcpu_ctx
->run
== MAP_FAILED
) {
472 fprintf(stderr
, "mmap vcpu area: %m\n");
483 static int kvm_set_boot_vcpu_id(kvm_context_t kvm
, uint32_t id
)
485 #ifdef KVM_CAP_SET_BOOT_CPU_ID
486 int r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_BOOT_CPU_ID
);
488 return kvm_vm_ioctl(kvm_state
, KVM_SET_BOOT_CPU_ID
, id
);
495 int kvm_create_vm(kvm_context_t kvm
)
498 #ifdef KVM_CAP_IRQ_ROUTING
499 kvm
->irq_routes
= qemu_mallocz(sizeof(*kvm
->irq_routes
));
500 kvm
->nr_allocated_irq_routes
= 0;
503 fd
= kvm_ioctl(kvm_state
, KVM_CREATE_VM
, 0);
505 fprintf(stderr
, "kvm_create_vm: %m\n");
508 kvm_state
->vmfd
= fd
;
512 static int kvm_create_default_phys_mem(kvm_context_t kvm
,
513 unsigned long phys_mem_bytes
,
516 #ifdef KVM_CAP_USER_MEMORY
517 int r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_USER_MEMORY
);
520 fprintf(stderr
, "Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported\n");
522 #error Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported
527 void kvm_create_irqchip(kvm_context_t kvm
)
531 kvm
->irqchip_in_kernel
= 0;
532 #ifdef KVM_CAP_IRQCHIP
533 if (!kvm
->no_irqchip_creation
) {
534 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_IRQCHIP
);
535 if (r
> 0) { /* kernel irqchip supported */
536 r
= kvm_vm_ioctl(kvm_state
, KVM_CREATE_IRQCHIP
);
538 kvm
->irqchip_inject_ioctl
= KVM_IRQ_LINE
;
539 #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
540 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
,
541 KVM_CAP_IRQ_INJECT_STATUS
);
543 kvm
->irqchip_inject_ioctl
= KVM_IRQ_LINE_STATUS
;
545 kvm
->irqchip_in_kernel
= 1;
548 fprintf(stderr
, "Create kernel PIC irqchip failed\n");
554 int kvm_create(kvm_context_t kvm
, unsigned long phys_mem_bytes
, void **vm_mem
)
558 r
= kvm_create_vm(kvm
);
561 r
= kvm_arch_create(kvm
, phys_mem_bytes
, vm_mem
);
565 r
= kvm_create_default_phys_mem(kvm
, phys_mem_bytes
, vm_mem
);
568 kvm_create_irqchip(kvm
);
574 int kvm_register_phys_mem(kvm_context_t kvm
,
575 unsigned long phys_start
, void *userspace_addr
,
576 unsigned long len
, int log
)
579 struct kvm_userspace_memory_region memory
= {
581 .guest_phys_addr
= phys_start
,
582 .userspace_addr
= (unsigned long)(intptr_t)userspace_addr
,
583 .flags
= log
? KVM_MEM_LOG_DIRTY_PAGES
: 0,
587 memory
.slot
= get_free_slot(kvm
);
588 DPRINTF("memory: gpa: %llx, size: %llx, uaddr: %llx, slot: %x, flags: %lx\n",
589 memory
.guest_phys_addr
, memory
.memory_size
,
590 memory
.userspace_addr
, memory
.slot
, memory
.flags
);
591 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_USER_MEMORY_REGION
, &memory
);
593 fprintf(stderr
, "create_userspace_phys_mem: %s\n", strerror(-r
));
596 register_slot(memory
.slot
, memory
.guest_phys_addr
, memory
.memory_size
,
597 memory
.userspace_addr
, memory
.flags
);
602 /* destroy/free a whole slot.
603 * phys_start, len and slot are the params passed to kvm_create_phys_mem()
605 void kvm_destroy_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
610 struct kvm_userspace_memory_region memory
= {
612 .guest_phys_addr
= phys_start
,
617 slot
= get_slot(phys_start
);
619 if ((slot
>= KVM_MAX_NUM_MEM_REGIONS
) || (slot
== -1)) {
620 fprintf(stderr
, "BUG: %s: invalid parameters (slot=%d)\n",
624 if (phys_start
!= slots
[slot
].phys_addr
) {
626 "WARNING: %s: phys_start is 0x%lx expecting 0x%lx\n",
627 __FUNCTION__
, phys_start
, slots
[slot
].phys_addr
);
628 phys_start
= slots
[slot
].phys_addr
;
632 DPRINTF("slot %d start %llx len %llx flags %x\n",
634 memory
.guest_phys_addr
,
637 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_USER_MEMORY_REGION
, &memory
);
639 fprintf(stderr
, "destroy_userspace_phys_mem: %s",
644 free_slot(memory
.slot
);
647 void kvm_unregister_memory_area(kvm_context_t kvm
, uint64_t phys_addr
, unsigned long size
)
650 int slot
= get_container_slot(phys_addr
, size
);
653 DPRINTF("Unregistering memory region %llx (%lx)\n", phys_addr
, size
);
654 kvm_destroy_phys_mem(kvm
, phys_addr
, size
);
659 static int kvm_get_map(kvm_context_t kvm
, int ioctl_num
, int slot
, void *buf
)
662 struct kvm_dirty_log log
= {
666 log
.dirty_bitmap
= buf
;
668 r
= kvm_vm_ioctl(kvm_state
, ioctl_num
, &log
);
674 int kvm_get_dirty_pages(kvm_context_t kvm
, unsigned long phys_addr
, void *buf
)
678 slot
= get_slot(phys_addr
);
679 return kvm_get_map(kvm
, KVM_GET_DIRTY_LOG
, slot
, buf
);
682 int kvm_get_dirty_pages_range(kvm_context_t kvm
, unsigned long phys_addr
,
683 unsigned long len
, void *opaque
,
684 int (*cb
)(unsigned long start
, unsigned long len
,
685 void*bitmap
, void *opaque
))
689 unsigned long end_addr
= phys_addr
+ len
;
692 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
) {
693 if ((slots
[i
].len
&& (uint64_t)slots
[i
].phys_addr
>= phys_addr
)
694 && ((uint64_t)slots
[i
].phys_addr
+ slots
[i
].len
<= end_addr
)) {
695 buf
= qemu_malloc((slots
[i
].len
/ 4096 + 7) / 8 + 2);
696 r
= kvm_get_map(kvm
, KVM_GET_DIRTY_LOG
, i
, buf
);
701 r
= cb(slots
[i
].phys_addr
, slots
[i
].len
, buf
, opaque
);
710 #ifdef KVM_CAP_IRQCHIP
712 int kvm_set_irq_level(kvm_context_t kvm
, int irq
, int level
, int *status
)
714 struct kvm_irq_level event
;
717 if (!kvm
->irqchip_in_kernel
)
721 r
= kvm_vm_ioctl(kvm_state
, kvm
->irqchip_inject_ioctl
, &event
);
723 perror("kvm_set_irq_level");
726 #ifdef KVM_CAP_IRQ_INJECT_STATUS
727 *status
= (kvm
->irqchip_inject_ioctl
== KVM_IRQ_LINE
) ?
737 int kvm_get_irqchip(kvm_context_t kvm
, struct kvm_irqchip
*chip
)
741 if (!kvm
->irqchip_in_kernel
)
743 r
= kvm_vm_ioctl(kvm_state
, KVM_GET_IRQCHIP
, chip
);
745 perror("kvm_get_irqchip\n");
750 int kvm_set_irqchip(kvm_context_t kvm
, struct kvm_irqchip
*chip
)
754 if (!kvm
->irqchip_in_kernel
)
756 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_IRQCHIP
, chip
);
758 perror("kvm_set_irqchip\n");
765 static int handle_io(kvm_vcpu_context_t vcpu
)
767 struct kvm_run
*run
= vcpu
->run
;
768 kvm_context_t kvm
= vcpu
->kvm
;
769 uint16_t addr
= run
->io
.port
;
771 void *p
= (void *)run
+ run
->io
.data_offset
;
773 for (i
= 0; i
< run
->io
.count
; ++i
) {
774 switch (run
->io
.direction
) {
776 switch (run
->io
.size
) {
778 *(uint8_t *)p
= cpu_inb(kvm
->opaque
, addr
);
781 *(uint16_t *)p
= cpu_inw(kvm
->opaque
, addr
);
784 *(uint32_t *)p
= cpu_inl(kvm
->opaque
, addr
);
787 fprintf(stderr
, "bad I/O size %d\n", run
->io
.size
);
791 case KVM_EXIT_IO_OUT
:
792 switch (run
->io
.size
) {
794 cpu_outb(kvm
->opaque
, addr
, *(uint8_t *)p
);
797 cpu_outw(kvm
->opaque
, addr
, *(uint16_t *)p
);
800 cpu_outl(kvm
->opaque
, addr
, *(uint32_t *)p
);
803 fprintf(stderr
, "bad I/O size %d\n", run
->io
.size
);
808 fprintf(stderr
, "bad I/O direction %d\n", run
->io
.direction
);
818 int handle_debug(kvm_vcpu_context_t vcpu
, void *env
)
820 #ifdef KVM_CAP_SET_GUEST_DEBUG
821 struct kvm_run
*run
= vcpu
->run
;
822 kvm_context_t kvm
= vcpu
->kvm
;
824 return kvm_debug(kvm
->opaque
, env
, &run
->debug
.arch
);
830 int kvm_get_regs(kvm_vcpu_context_t vcpu
, struct kvm_regs
*regs
)
832 return ioctl(vcpu
->fd
, KVM_GET_REGS
, regs
);
835 int kvm_set_regs(kvm_vcpu_context_t vcpu
, struct kvm_regs
*regs
)
837 return ioctl(vcpu
->fd
, KVM_SET_REGS
, regs
);
840 int kvm_get_fpu(kvm_vcpu_context_t vcpu
, struct kvm_fpu
*fpu
)
842 return ioctl(vcpu
->fd
, KVM_GET_FPU
, fpu
);
845 int kvm_set_fpu(kvm_vcpu_context_t vcpu
, struct kvm_fpu
*fpu
)
847 return ioctl(vcpu
->fd
, KVM_SET_FPU
, fpu
);
850 int kvm_get_sregs(kvm_vcpu_context_t vcpu
, struct kvm_sregs
*sregs
)
852 return ioctl(vcpu
->fd
, KVM_GET_SREGS
, sregs
);
855 int kvm_set_sregs(kvm_vcpu_context_t vcpu
, struct kvm_sregs
*sregs
)
857 return ioctl(vcpu
->fd
, KVM_SET_SREGS
, sregs
);
860 #ifdef KVM_CAP_MP_STATE
861 int kvm_get_mpstate(kvm_vcpu_context_t vcpu
, struct kvm_mp_state
*mp_state
)
865 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_MP_STATE
);
867 return ioctl(vcpu
->fd
, KVM_GET_MP_STATE
, mp_state
);
871 int kvm_set_mpstate(kvm_vcpu_context_t vcpu
, struct kvm_mp_state
*mp_state
)
875 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_MP_STATE
);
877 return ioctl(vcpu
->fd
, KVM_SET_MP_STATE
, mp_state
);
882 static int handle_mmio(kvm_vcpu_context_t vcpu
)
884 unsigned long addr
= vcpu
->run
->mmio
.phys_addr
;
885 kvm_context_t kvm
= vcpu
->kvm
;
886 struct kvm_run
*kvm_run
= vcpu
->run
;
887 void *data
= kvm_run
->mmio
.data
;
889 /* hack: Red Hat 7.1 generates these weird accesses. */
890 if ((addr
> 0xa0000-4 && addr
<= 0xa0000) && kvm_run
->mmio
.len
== 3)
893 if (kvm_run
->mmio
.is_write
)
894 return kvm_mmio_write(kvm
->opaque
, addr
, data
,
897 return kvm_mmio_read(kvm
->opaque
, addr
, data
,
901 int handle_io_window(kvm_context_t kvm
)
906 int handle_halt(kvm_vcpu_context_t vcpu
)
908 return kvm_arch_halt(vcpu
->kvm
->opaque
, vcpu
);
911 int handle_shutdown(kvm_context_t kvm
, CPUState
*env
)
913 /* stop the current vcpu from going back to guest mode */
916 qemu_system_reset_request();
920 static inline void push_nmi(kvm_context_t kvm
)
922 #ifdef KVM_CAP_USER_NMI
923 kvm_arch_push_nmi(kvm
->opaque
);
924 #endif /* KVM_CAP_USER_NMI */
927 void post_kvm_run(kvm_context_t kvm
, CPUState
*env
)
929 pthread_mutex_lock(&qemu_mutex
);
930 kvm_arch_post_kvm_run(kvm
->opaque
, env
);
933 int pre_kvm_run(kvm_context_t kvm
, CPUState
*env
)
935 kvm_arch_pre_kvm_run(kvm
->opaque
, env
);
937 pthread_mutex_unlock(&qemu_mutex
);
941 int kvm_get_interrupt_flag(kvm_vcpu_context_t vcpu
)
943 return vcpu
->run
->if_flag
;
946 int kvm_is_ready_for_interrupt_injection(kvm_vcpu_context_t vcpu
)
948 return vcpu
->run
->ready_for_interrupt_injection
;
951 int kvm_run(kvm_vcpu_context_t vcpu
, void *env
)
955 struct kvm_run
*run
= vcpu
->run
;
956 kvm_context_t kvm
= vcpu
->kvm
;
960 #if !defined(__s390__)
961 if (!kvm
->irqchip_in_kernel
)
962 run
->request_interrupt_window
= kvm_arch_try_push_interrupts(env
);
964 r
= pre_kvm_run(kvm
, env
);
967 r
= ioctl(fd
, KVM_RUN
, 0);
969 if (r
== -1 && errno
!= EINTR
&& errno
!= EAGAIN
) {
971 post_kvm_run(kvm
, env
);
972 fprintf(stderr
, "kvm_run: %s\n", strerror(-r
));
976 post_kvm_run(kvm
, env
);
978 #if defined(KVM_CAP_COALESCED_MMIO)
979 if (kvm
->coalesced_mmio
) {
980 struct kvm_coalesced_mmio_ring
*ring
= (void *)run
+
981 kvm
->coalesced_mmio
* PAGE_SIZE
;
982 while (ring
->first
!= ring
->last
) {
983 kvm_mmio_write(kvm
->opaque
,
984 ring
->coalesced_mmio
[ring
->first
].phys_addr
,
985 &ring
->coalesced_mmio
[ring
->first
].data
[0],
986 ring
->coalesced_mmio
[ring
->first
].len
);
988 ring
->first
= (ring
->first
+ 1) %
989 KVM_COALESCED_MMIO_MAX
;
994 #if !defined(__s390__)
996 r
= handle_io_window(kvm
);
1001 switch (run
->exit_reason
) {
1002 case KVM_EXIT_UNKNOWN
:
1003 r
= handle_unhandled(run
->hw
.hardware_exit_reason
);
1005 case KVM_EXIT_FAIL_ENTRY
:
1006 r
= handle_unhandled(run
->fail_entry
.hardware_entry_failure_reason
);
1008 case KVM_EXIT_EXCEPTION
:
1009 fprintf(stderr
, "exception %d (%x)\n",
1011 run
->ex
.error_code
);
1012 kvm_show_regs(vcpu
);
1013 kvm_show_code(vcpu
);
1017 r
= handle_io(vcpu
);
1019 case KVM_EXIT_DEBUG
:
1020 r
= handle_debug(vcpu
, env
);
1023 r
= handle_mmio(vcpu
);
1026 r
= handle_halt(vcpu
);
1028 case KVM_EXIT_IRQ_WINDOW_OPEN
:
1030 case KVM_EXIT_SHUTDOWN
:
1031 r
= handle_shutdown(kvm
, env
);
1033 #if defined(__s390__)
1034 case KVM_EXIT_S390_SIEIC
:
1035 r
= kvm_s390_handle_intercept(kvm
, vcpu
,
1038 case KVM_EXIT_S390_RESET
:
1039 r
= kvm_s390_handle_reset(kvm
, vcpu
, run
);
1043 if (kvm_arch_run(vcpu
)) {
1044 fprintf(stderr
, "unhandled vm exit: 0x%x\n",
1046 kvm_show_regs(vcpu
);
1058 int kvm_inject_irq(kvm_vcpu_context_t vcpu
, unsigned irq
)
1060 struct kvm_interrupt intr
;
1063 return ioctl(vcpu
->fd
, KVM_INTERRUPT
, &intr
);
1066 #ifdef KVM_CAP_SET_GUEST_DEBUG
1067 int kvm_set_guest_debug(kvm_vcpu_context_t vcpu
, struct kvm_guest_debug
*dbg
)
1069 return ioctl(vcpu
->fd
, KVM_SET_GUEST_DEBUG
, dbg
);
1073 int kvm_set_signal_mask(kvm_vcpu_context_t vcpu
, const sigset_t
*sigset
)
1075 struct kvm_signal_mask
*sigmask
;
1079 r
= ioctl(vcpu
->fd
, KVM_SET_SIGNAL_MASK
, NULL
);
1084 sigmask
= qemu_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
1087 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
1088 r
= ioctl(vcpu
->fd
, KVM_SET_SIGNAL_MASK
, sigmask
);
1095 int kvm_irqchip_in_kernel(kvm_context_t kvm
)
1097 return kvm
->irqchip_in_kernel
;
1100 int kvm_pit_in_kernel(kvm_context_t kvm
)
1102 return kvm
->pit_in_kernel
;
1105 int kvm_has_sync_mmu(void)
1108 #ifdef KVM_CAP_SYNC_MMU
1109 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SYNC_MMU
);
1114 int kvm_inject_nmi(kvm_vcpu_context_t vcpu
)
1116 #ifdef KVM_CAP_USER_NMI
1117 return ioctl(vcpu
->fd
, KVM_NMI
);
1123 int kvm_init_coalesced_mmio(kvm_context_t kvm
)
1126 kvm
->coalesced_mmio
= 0;
1127 #ifdef KVM_CAP_COALESCED_MMIO
1128 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_COALESCED_MMIO
);
1130 kvm
->coalesced_mmio
= r
;
1137 int kvm_coalesce_mmio_region(target_phys_addr_t addr
, ram_addr_t size
)
1139 #ifdef KVM_CAP_COALESCED_MMIO
1140 kvm_context_t kvm
= kvm_context
;
1141 struct kvm_coalesced_mmio_zone zone
;
1144 if (kvm
->coalesced_mmio
) {
1149 r
= kvm_vm_ioctl(kvm_state
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
1151 perror("kvm_register_coalesced_mmio_zone");
1160 int kvm_uncoalesce_mmio_region(target_phys_addr_t addr
, ram_addr_t size
)
1162 #ifdef KVM_CAP_COALESCED_MMIO
1163 kvm_context_t kvm
= kvm_context
;
1164 struct kvm_coalesced_mmio_zone zone
;
1167 if (kvm
->coalesced_mmio
) {
1172 r
= kvm_vm_ioctl(kvm_state
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
1174 perror("kvm_unregister_coalesced_mmio_zone");
1177 DPRINTF("Unregistered coalesced mmio region for %llx (%lx)\n", addr
, size
);
1184 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1185 int kvm_assign_pci_device(kvm_context_t kvm
,
1186 struct kvm_assigned_pci_dev
*assigned_dev
)
1188 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_PCI_DEVICE
, assigned_dev
);
1191 static int kvm_old_assign_irq(kvm_context_t kvm
,
1192 struct kvm_assigned_irq
*assigned_irq
)
1194 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_IRQ
, assigned_irq
);
1197 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
1198 int kvm_assign_irq(kvm_context_t kvm
,
1199 struct kvm_assigned_irq
*assigned_irq
)
1203 ret
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_ASSIGN_DEV_IRQ
);
1205 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_DEV_IRQ
, assigned_irq
);
1208 return kvm_old_assign_irq(kvm
, assigned_irq
);
1211 int kvm_deassign_irq(kvm_context_t kvm
,
1212 struct kvm_assigned_irq
*assigned_irq
)
1214 return kvm_vm_ioctl(kvm_state
, KVM_DEASSIGN_DEV_IRQ
, assigned_irq
);
1217 int kvm_assign_irq(kvm_context_t kvm
,
1218 struct kvm_assigned_irq
*assigned_irq
)
1220 return kvm_old_assign_irq(kvm
, assigned_irq
);
1225 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
1226 int kvm_deassign_pci_device(kvm_context_t kvm
,
1227 struct kvm_assigned_pci_dev
*assigned_dev
)
1229 return kvm_vm_ioctl(kvm_state
, KVM_DEASSIGN_PCI_DEVICE
, assigned_dev
);
1233 int kvm_destroy_memory_region_works(kvm_context_t kvm
)
1237 #ifdef KVM_CAP_DESTROY_MEMORY_REGION_WORKS
1238 ret
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
,
1239 KVM_CAP_DESTROY_MEMORY_REGION_WORKS
);
1246 int kvm_reinject_control(kvm_context_t kvm
, int pit_reinject
)
1248 #ifdef KVM_CAP_REINJECT_CONTROL
1250 struct kvm_reinject_control control
;
1252 control
.pit_reinject
= pit_reinject
;
1254 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_REINJECT_CONTROL
);
1256 return kvm_vm_ioctl(kvm_state
, KVM_REINJECT_CONTROL
, &control
);
1262 int kvm_has_gsi_routing(kvm_context_t kvm
)
1266 #ifdef KVM_CAP_IRQ_ROUTING
1267 r
= kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
1272 int kvm_get_gsi_count(kvm_context_t kvm
)
1274 #ifdef KVM_CAP_IRQ_ROUTING
1275 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
1281 int kvm_clear_gsi_routes(kvm_context_t kvm
)
1283 #ifdef KVM_CAP_IRQ_ROUTING
1284 kvm
->irq_routes
->nr
= 0;
1291 int kvm_add_routing_entry(kvm_context_t kvm
,
1292 struct kvm_irq_routing_entry
* entry
)
1294 #ifdef KVM_CAP_IRQ_ROUTING
1295 struct kvm_irq_routing
*z
;
1296 struct kvm_irq_routing_entry
*new;
1299 if (kvm
->irq_routes
->nr
== kvm
->nr_allocated_irq_routes
) {
1300 n
= kvm
->nr_allocated_irq_routes
* 2;
1303 size
= sizeof(struct kvm_irq_routing
);
1304 size
+= n
* sizeof(*new);
1305 z
= realloc(kvm
->irq_routes
, size
);
1308 kvm
->nr_allocated_irq_routes
= n
;
1309 kvm
->irq_routes
= z
;
1311 n
= kvm
->irq_routes
->nr
++;
1312 new = &kvm
->irq_routes
->entries
[n
];
1313 memset(new, 0, sizeof(*new));
1314 new->gsi
= entry
->gsi
;
1315 new->type
= entry
->type
;
1316 new->flags
= entry
->flags
;
1319 set_gsi(kvm
, entry
->gsi
);
1327 int kvm_add_irq_route(kvm_context_t kvm
, int gsi
, int irqchip
, int pin
)
1329 #ifdef KVM_CAP_IRQ_ROUTING
1330 struct kvm_irq_routing_entry e
;
1333 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1335 e
.u
.irqchip
.irqchip
= irqchip
;
1336 e
.u
.irqchip
.pin
= pin
;
1337 return kvm_add_routing_entry(kvm
, &e
);
1343 int kvm_del_routing_entry(kvm_context_t kvm
,
1344 struct kvm_irq_routing_entry
* entry
)
1346 #ifdef KVM_CAP_IRQ_ROUTING
1347 struct kvm_irq_routing_entry
*e
, *p
;
1348 int i
, gsi
, found
= 0;
1352 for (i
= 0; i
< kvm
->irq_routes
->nr
; ++i
) {
1353 e
= &kvm
->irq_routes
->entries
[i
];
1354 if (e
->type
== entry
->type
1358 case KVM_IRQ_ROUTING_IRQCHIP
: {
1359 if (e
->u
.irqchip
.irqchip
==
1360 entry
->u
.irqchip
.irqchip
1361 && e
->u
.irqchip
.pin
==
1362 entry
->u
.irqchip
.pin
) {
1363 p
= &kvm
->irq_routes
->
1364 entries
[--kvm
->irq_routes
->nr
];
1370 case KVM_IRQ_ROUTING_MSI
: {
1371 if (e
->u
.msi
.address_lo
==
1372 entry
->u
.msi
.address_lo
1373 && e
->u
.msi
.address_hi
==
1374 entry
->u
.msi
.address_hi
1375 && e
->u
.msi
.data
== entry
->u
.msi
.data
) {
1376 p
= &kvm
->irq_routes
->
1377 entries
[--kvm
->irq_routes
->nr
];
1387 /* If there are no other users of this GSI
1388 * mark it available in the bitmap */
1389 for (i
= 0; i
< kvm
->irq_routes
->nr
; i
++) {
1390 e
= &kvm
->irq_routes
->entries
[i
];
1394 if (i
== kvm
->irq_routes
->nr
)
1395 clear_gsi(kvm
, gsi
);
1407 int kvm_update_routing_entry(kvm_context_t kvm
,
1408 struct kvm_irq_routing_entry
* entry
,
1409 struct kvm_irq_routing_entry
* newentry
)
1411 #ifdef KVM_CAP_IRQ_ROUTING
1412 struct kvm_irq_routing_entry
*e
;
1415 if (entry
->gsi
!= newentry
->gsi
||
1416 entry
->type
!= newentry
->type
) {
1420 for (i
= 0; i
< kvm
->irq_routes
->nr
; ++i
) {
1421 e
= &kvm
->irq_routes
->entries
[i
];
1422 if (e
->type
!= entry
->type
|| e
->gsi
!= entry
->gsi
) {
1426 case KVM_IRQ_ROUTING_IRQCHIP
:
1427 if (e
->u
.irqchip
.irqchip
== entry
->u
.irqchip
.irqchip
&&
1428 e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
1429 memcpy(&e
->u
.irqchip
, &newentry
->u
.irqchip
, sizeof e
->u
.irqchip
);
1433 case KVM_IRQ_ROUTING_MSI
:
1434 if (e
->u
.msi
.address_lo
== entry
->u
.msi
.address_lo
&&
1435 e
->u
.msi
.address_hi
== entry
->u
.msi
.address_hi
&&
1436 e
->u
.msi
.data
== entry
->u
.msi
.data
) {
1437 memcpy(&e
->u
.msi
, &newentry
->u
.msi
, sizeof e
->u
.msi
);
1451 int kvm_del_irq_route(kvm_context_t kvm
, int gsi
, int irqchip
, int pin
)
1453 #ifdef KVM_CAP_IRQ_ROUTING
1454 struct kvm_irq_routing_entry e
;
1457 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1459 e
.u
.irqchip
.irqchip
= irqchip
;
1460 e
.u
.irqchip
.pin
= pin
;
1461 return kvm_del_routing_entry(kvm
, &e
);
1467 int kvm_commit_irq_routes(kvm_context_t kvm
)
1469 #ifdef KVM_CAP_IRQ_ROUTING
1470 kvm
->irq_routes
->flags
= 0;
1471 return kvm_vm_ioctl(kvm_state
, KVM_SET_GSI_ROUTING
, kvm
->irq_routes
);
1477 int kvm_get_irq_route_gsi(kvm_context_t kvm
)
1480 uint32_t *buf
= kvm
->used_gsi_bitmap
;
1482 /* Return the lowest unused GSI in the bitmap */
1483 for (i
= 0; i
< kvm
->max_gsi
/ 32; i
++) {
1488 return bit
- 1 + i
* 32;
1494 #ifdef KVM_CAP_DEVICE_MSIX
1495 int kvm_assign_set_msix_nr(kvm_context_t kvm
,
1496 struct kvm_assigned_msix_nr
*msix_nr
)
1498 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_SET_MSIX_NR
, msix_nr
);
1501 int kvm_assign_set_msix_entry(kvm_context_t kvm
,
1502 struct kvm_assigned_msix_entry
*entry
)
1504 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_SET_MSIX_ENTRY
, entry
);
1508 #if defined(KVM_CAP_IRQFD) && defined(CONFIG_eventfd)
1510 #include <sys/eventfd.h>
1512 static int _kvm_irqfd(kvm_context_t kvm
, int fd
, int gsi
, int flags
)
1514 struct kvm_irqfd data
= {
1520 return kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &data
);
1523 int kvm_irqfd(kvm_context_t kvm
, int gsi
, int flags
)
1528 if (!kvm_check_extension(kvm_state
, KVM_CAP_IRQFD
))
1535 r
= _kvm_irqfd(kvm
, fd
, gsi
, 0);
1544 #else /* KVM_CAP_IRQFD */
1546 int kvm_irqfd(kvm_context_t kvm
, int gsi
, int flags
)
1551 #endif /* KVM_CAP_IRQFD */
1552 static inline unsigned long kvm_get_thread_id(void)
1554 return syscall(SYS_gettid
);
1557 static void qemu_cond_wait(pthread_cond_t
*cond
)
1559 CPUState
*env
= cpu_single_env
;
1560 static const struct timespec ts
= {
1565 pthread_cond_timedwait(cond
, &qemu_mutex
, &ts
);
1566 cpu_single_env
= env
;
1569 static void sig_ipi_handler(int n
)
1573 static void on_vcpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
1575 struct qemu_work_item wi
;
1577 if (env
== current_env
) {
1584 if (!env
->kvm_cpu_state
.queued_work_first
)
1585 env
->kvm_cpu_state
.queued_work_first
= &wi
;
1587 env
->kvm_cpu_state
.queued_work_last
->next
= &wi
;
1588 env
->kvm_cpu_state
.queued_work_last
= &wi
;
1592 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
1594 qemu_cond_wait(&qemu_work_cond
);
1597 static void inject_interrupt(void *data
)
1599 cpu_interrupt(current_env
, (long)data
);
1602 void kvm_inject_interrupt(CPUState
*env
, int mask
)
1604 on_vcpu(env
, inject_interrupt
, (void *)(long)mask
);
1607 void kvm_update_interrupt_request(CPUState
*env
)
1612 if (!current_env
|| !current_env
->created
)
1615 * Testing for created here is really redundant
1617 if (current_env
&& current_env
->created
&&
1618 env
!= current_env
&& !env
->kvm_cpu_state
.signalled
)
1622 env
->kvm_cpu_state
.signalled
= 1;
1623 if (env
->kvm_cpu_state
.thread
)
1624 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
1629 static void kvm_do_load_registers(void *_env
)
1631 CPUState
*env
= _env
;
1633 kvm_arch_load_regs(env
);
1636 void kvm_load_registers(CPUState
*env
)
1638 if (kvm_enabled() && qemu_system_ready
)
1639 on_vcpu(env
, kvm_do_load_registers
, env
);
1642 static void kvm_do_save_registers(void *_env
)
1644 CPUState
*env
= _env
;
1646 kvm_arch_save_regs(env
);
1649 void kvm_save_registers(CPUState
*env
)
1652 on_vcpu(env
, kvm_do_save_registers
, env
);
1655 static void kvm_do_load_mpstate(void *_env
)
1657 CPUState
*env
= _env
;
1659 kvm_arch_load_mpstate(env
);
1662 void kvm_load_mpstate(CPUState
*env
)
1664 if (kvm_enabled() && qemu_system_ready
)
1665 on_vcpu(env
, kvm_do_load_mpstate
, env
);
1668 static void kvm_do_save_mpstate(void *_env
)
1670 CPUState
*env
= _env
;
1672 kvm_arch_save_mpstate(env
);
1673 env
->halted
= (env
->mp_state
== KVM_MP_STATE_HALTED
);
1676 void kvm_save_mpstate(CPUState
*env
)
1679 on_vcpu(env
, kvm_do_save_mpstate
, env
);
1682 int kvm_cpu_exec(CPUState
*env
)
1686 r
= kvm_run(env
->kvm_cpu_state
.vcpu_ctx
, env
);
1688 printf("kvm_run returned %d\n", r
);
1695 static int is_cpu_stopped(CPUState
*env
)
1697 return !vm_running
|| env
->stopped
;
1700 static void flush_queued_work(CPUState
*env
)
1702 struct qemu_work_item
*wi
;
1704 if (!env
->kvm_cpu_state
.queued_work_first
)
1707 while ((wi
= env
->kvm_cpu_state
.queued_work_first
)) {
1708 env
->kvm_cpu_state
.queued_work_first
= wi
->next
;
1712 env
->kvm_cpu_state
.queued_work_last
= NULL
;
1713 pthread_cond_broadcast(&qemu_work_cond
);
1716 static void kvm_main_loop_wait(CPUState
*env
, int timeout
)
1723 pthread_mutex_unlock(&qemu_mutex
);
1725 ts
.tv_sec
= timeout
/ 1000;
1726 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
1727 sigemptyset(&waitset
);
1728 sigaddset(&waitset
, SIG_IPI
);
1730 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
1733 pthread_mutex_lock(&qemu_mutex
);
1735 if (r
== -1 && !(e
== EAGAIN
|| e
== EINTR
)) {
1736 printf("sigtimedwait: %s\n", strerror(e
));
1740 cpu_single_env
= env
;
1741 flush_queued_work(env
);
1746 pthread_cond_signal(&qemu_pause_cond
);
1749 env
->kvm_cpu_state
.signalled
= 0;
1752 static int all_threads_paused(void)
1754 CPUState
*penv
= first_cpu
;
1759 penv
= (CPUState
*)penv
->next_cpu
;
1765 static void pause_all_threads(void)
1767 CPUState
*penv
= first_cpu
;
1770 if (penv
!= cpu_single_env
) {
1772 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
1778 penv
= (CPUState
*)penv
->next_cpu
;
1781 while (!all_threads_paused())
1782 qemu_cond_wait(&qemu_pause_cond
);
1785 static void resume_all_threads(void)
1787 CPUState
*penv
= first_cpu
;
1789 assert(!cpu_single_env
);
1794 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
1795 penv
= (CPUState
*)penv
->next_cpu
;
1799 static void kvm_vm_state_change_handler(void *context
, int running
, int reason
)
1802 resume_all_threads();
1804 pause_all_threads();
1807 static void setup_kernel_sigmask(CPUState
*env
)
1812 sigaddset(&set
, SIGUSR2
);
1813 sigaddset(&set
, SIGIO
);
1814 sigaddset(&set
, SIGALRM
);
1815 sigprocmask(SIG_BLOCK
, &set
, NULL
);
1817 sigprocmask(SIG_BLOCK
, NULL
, &set
);
1818 sigdelset(&set
, SIG_IPI
);
1820 kvm_set_signal_mask(env
->kvm_cpu_state
.vcpu_ctx
, &set
);
1823 static void qemu_kvm_system_reset(void)
1825 CPUState
*penv
= first_cpu
;
1827 pause_all_threads();
1829 qemu_system_reset();
1832 kvm_arch_cpu_reset(penv
);
1833 penv
= (CPUState
*)penv
->next_cpu
;
1836 resume_all_threads();
1839 static void process_irqchip_events(CPUState
*env
)
1841 kvm_arch_process_irqchip_events(env
);
1842 if (kvm_arch_has_work(env
))
1846 static int kvm_main_loop_cpu(CPUState
*env
)
1848 setup_kernel_sigmask(env
);
1850 pthread_mutex_lock(&qemu_mutex
);
1852 kvm_qemu_init_env(env
);
1854 kvm_tpr_vcpu_start(env
);
1857 cpu_single_env
= env
;
1858 kvm_arch_load_regs(env
);
1861 int run_cpu
= !is_cpu_stopped(env
);
1862 if (run_cpu
&& !kvm_irqchip_in_kernel(kvm_context
)) {
1863 process_irqchip_events(env
);
1864 run_cpu
= !env
->halted
;
1867 kvm_main_loop_wait(env
, 0);
1870 kvm_main_loop_wait(env
, 1000);
1873 pthread_mutex_unlock(&qemu_mutex
);
1877 static void *ap_main_loop(void *_env
)
1879 CPUState
*env
= _env
;
1881 struct ioperm_data
*data
= NULL
;
1884 env
->thread_id
= kvm_get_thread_id();
1885 sigfillset(&signals
);
1886 sigprocmask(SIG_BLOCK
, &signals
, NULL
);
1887 env
->kvm_cpu_state
.vcpu_ctx
= kvm_create_vcpu(env
, env
->cpu_index
);
1889 #ifdef USE_KVM_DEVICE_ASSIGNMENT
1890 /* do ioperm for io ports of assigned devices */
1891 LIST_FOREACH(data
, &ioperm_head
, entries
)
1892 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
1895 /* signal VCPU creation */
1896 pthread_mutex_lock(&qemu_mutex
);
1897 current_env
->created
= 1;
1898 pthread_cond_signal(&qemu_vcpu_cond
);
1900 /* and wait for machine initialization */
1901 while (!qemu_system_ready
)
1902 qemu_cond_wait(&qemu_system_cond
);
1903 pthread_mutex_unlock(&qemu_mutex
);
1905 kvm_main_loop_cpu(env
);
1909 void kvm_init_vcpu(CPUState
*env
)
1911 pthread_create(&env
->kvm_cpu_state
.thread
, NULL
, ap_main_loop
, env
);
1913 while (env
->created
== 0)
1914 qemu_cond_wait(&qemu_vcpu_cond
);
1917 int kvm_vcpu_inited(CPUState
*env
)
1919 return env
->created
;
1923 void kvm_hpet_disable_kpit(void)
1925 struct kvm_pit_state2 ps2
;
1927 kvm_get_pit2(kvm_context
, &ps2
);
1928 ps2
.flags
|= KVM_PIT_FLAGS_HPET_LEGACY
;
1929 kvm_set_pit2(kvm_context
, &ps2
);
1932 void kvm_hpet_enable_kpit(void)
1934 struct kvm_pit_state2 ps2
;
1936 kvm_get_pit2(kvm_context
, &ps2
);
1937 ps2
.flags
&= ~KVM_PIT_FLAGS_HPET_LEGACY
;
1938 kvm_set_pit2(kvm_context
, &ps2
);
1942 int kvm_init_ap(void)
1945 kvm_tpr_opt_setup();
1947 qemu_add_vm_change_state_handler(kvm_vm_state_change_handler
, NULL
);
1949 signal(SIG_IPI
, sig_ipi_handler
);
1953 void qemu_kvm_notify_work(void)
1959 if (io_thread_fd
== -1)
1962 memcpy(buffer
, &value
, sizeof(value
));
1964 while (offset
< 8) {
1967 len
= write(io_thread_fd
, buffer
+ offset
, 8 - offset
);
1968 if (len
== -1 && errno
== EINTR
)
1971 /* In case we have a pipe, there is not reason to insist writing
1974 if (len
== -1 && errno
== EAGAIN
)
1984 /* If we have signalfd, we mask out the signals we want to handle and then
1985 * use signalfd to listen for them. We rely on whatever the current signal
1986 * handler is to dispatch the signals when we receive them.
1989 static void sigfd_handler(void *opaque
)
1991 int fd
= (unsigned long)opaque
;
1992 struct qemu_signalfd_siginfo info
;
1993 struct sigaction action
;
1998 len
= read(fd
, &info
, sizeof(info
));
1999 } while (len
== -1 && errno
== EINTR
);
2001 if (len
== -1 && errno
== EAGAIN
)
2004 if (len
!= sizeof(info
)) {
2005 printf("read from sigfd returned %zd: %m\n", len
);
2009 sigaction(info
.ssi_signo
, NULL
, &action
);
2010 if (action
.sa_handler
)
2011 action
.sa_handler(info
.ssi_signo
);
2016 /* Used to break IO thread out of select */
2017 static void io_thread_wakeup(void *opaque
)
2019 int fd
= (unsigned long)opaque
;
2022 /* Drain the pipe/(eventfd) */
2026 len
= read(fd
, buffer
, sizeof(buffer
));
2027 if (len
== -1 && errno
== EINTR
)
2035 int kvm_main_loop(void)
2041 io_thread
= pthread_self();
2042 qemu_system_ready
= 1;
2044 if (qemu_eventfd(fds
) == -1) {
2045 fprintf(stderr
, "failed to create eventfd\n");
2049 fcntl(fds
[0], F_SETFL
, O_NONBLOCK
);
2050 fcntl(fds
[1], F_SETFL
, O_NONBLOCK
);
2052 qemu_set_fd_handler2(fds
[0], NULL
, io_thread_wakeup
, NULL
,
2053 (void *)(unsigned long)fds
[0]);
2055 io_thread_fd
= fds
[1];
2058 sigaddset(&mask
, SIGIO
);
2059 sigaddset(&mask
, SIGALRM
);
2060 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
2062 sigfd
= qemu_signalfd(&mask
);
2064 fprintf(stderr
, "failed to create signalfd\n");
2068 fcntl(sigfd
, F_SETFL
, O_NONBLOCK
);
2070 qemu_set_fd_handler2(sigfd
, NULL
, sigfd_handler
, NULL
,
2071 (void *)(unsigned long)sigfd
);
2073 pthread_cond_broadcast(&qemu_system_cond
);
2075 io_thread_sigfd
= sigfd
;
2076 cpu_single_env
= NULL
;
2079 main_loop_wait(1000);
2080 if (qemu_shutdown_requested()) {
2081 if (qemu_no_shutdown()) {
2085 } else if (qemu_powerdown_requested())
2086 qemu_system_powerdown();
2087 else if (qemu_reset_requested())
2088 qemu_kvm_system_reset();
2089 else if (kvm_debug_cpu_requested
) {
2090 gdb_set_stop_cpu(kvm_debug_cpu_requested
);
2091 vm_stop(EXCP_DEBUG
);
2092 kvm_debug_cpu_requested
= NULL
;
2096 pause_all_threads();
2097 pthread_mutex_unlock(&qemu_mutex
);
2103 static int destroy_region_works
= 0;
2107 #if !defined(TARGET_I386)
2108 int kvm_arch_init_irq_routing(void)
2116 static int kvm_create_context()
2121 kvm_disable_irqchip_creation(kvm_context
);
2124 kvm_disable_pit_creation(kvm_context
);
2126 if (kvm_create(kvm_context
, 0, NULL
) < 0) {
2127 kvm_finalize(kvm_state
);
2130 r
= kvm_arch_qemu_create_context();
2132 kvm_finalize(kvm_state
);
2133 if (kvm_pit
&& !kvm_pit_reinject
) {
2134 if (kvm_reinject_control(kvm_context
, 0)) {
2135 fprintf(stderr
, "failure to disable in-kernel PIT reinjection\n");
2140 destroy_region_works
= kvm_destroy_memory_region_works(kvm_context
);
2143 r
= kvm_arch_init_irq_routing();
2150 if (!qemu_kvm_has_gsi_routing()) {
2153 /* if kernel can't do irq routing, interrupt source
2154 * override 0->2 can not be set up as required by hpet,
2158 } else if (!qemu_kvm_has_pit_state2()) {
2170 static int must_use_aliases_source(target_phys_addr_t addr
)
2172 if (destroy_region_works
)
2174 if (addr
== 0xa0000 || addr
== 0xa8000)
2179 static int must_use_aliases_target(target_phys_addr_t addr
)
2181 if (destroy_region_works
)
2183 if (addr
>= 0xe0000000 && addr
< 0x100000000ull
)
2188 static struct mapping
{
2189 target_phys_addr_t phys
;
2193 static int nr_mappings
;
2195 static struct mapping
*find_ram_mapping(ram_addr_t ram_addr
)
2199 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
2200 if (p
->ram
<= ram_addr
&& ram_addr
< p
->ram
+ p
->len
) {
2207 static struct mapping
*find_mapping(target_phys_addr_t start_addr
)
2211 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
2212 if (p
->phys
<= start_addr
&& start_addr
< p
->phys
+ p
->len
) {
2219 static void drop_mapping(target_phys_addr_t start_addr
)
2221 struct mapping
*p
= find_mapping(start_addr
);
2224 *p
= mappings
[--nr_mappings
];
2228 void kvm_set_phys_mem(target_phys_addr_t start_addr
, ram_addr_t size
,
2229 ram_addr_t phys_offset
)
2232 unsigned long area_flags
;
2237 if (start_addr
+ size
> phys_ram_size
) {
2238 phys_ram_size
= start_addr
+ size
;
2241 phys_offset
&= ~IO_MEM_ROM
;
2242 area_flags
= phys_offset
& ~TARGET_PAGE_MASK
;
2244 if (area_flags
!= IO_MEM_RAM
) {
2246 if (must_use_aliases_source(start_addr
)) {
2247 kvm_destroy_memory_alias(kvm_context
, start_addr
);
2250 if (must_use_aliases_target(start_addr
))
2254 p
= find_mapping(start_addr
);
2256 kvm_unregister_memory_area(kvm_context
, p
->phys
, p
->len
);
2257 drop_mapping(p
->phys
);
2259 start_addr
+= TARGET_PAGE_SIZE
;
2260 if (size
> TARGET_PAGE_SIZE
) {
2261 size
-= TARGET_PAGE_SIZE
;
2269 r
= kvm_is_containing_region(kvm_context
, start_addr
, size
);
2273 if (area_flags
>= TLB_MMIO
)
2277 if (must_use_aliases_source(start_addr
)) {
2278 p
= find_ram_mapping(phys_offset
);
2280 kvm_create_memory_alias(kvm_context
, start_addr
, size
,
2281 p
->phys
+ (phys_offset
- p
->ram
));
2287 r
= kvm_register_phys_mem(kvm_context
, start_addr
,
2288 qemu_get_ram_ptr(phys_offset
),
2291 printf("kvm_cpu_register_physical_memory: failed\n");
2296 drop_mapping(start_addr
);
2297 p
= &mappings
[nr_mappings
++];
2298 p
->phys
= start_addr
;
2299 p
->ram
= phys_offset
;
2306 int kvm_setup_guest_memory(void *area
, unsigned long size
)
2310 #ifdef MADV_DONTFORK
2311 if (kvm_enabled() && !kvm_has_sync_mmu())
2312 ret
= madvise(area
, size
, MADV_DONTFORK
);
2321 int kvm_qemu_check_extension(int ext
)
2323 return kvm_check_extension(kvm_state
, ext
);
2326 int kvm_qemu_init_env(CPUState
*cenv
)
2328 return kvm_arch_qemu_init_env(cenv
);
2331 #ifdef KVM_CAP_SET_GUEST_DEBUG
2333 struct kvm_set_guest_debug_data
{
2334 struct kvm_guest_debug dbg
;
2338 static void kvm_invoke_set_guest_debug(void *data
)
2340 struct kvm_set_guest_debug_data
*dbg_data
= data
;
2342 dbg_data
->err
= kvm_set_guest_debug(cpu_single_env
->kvm_cpu_state
.vcpu_ctx
,
2346 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
2348 struct kvm_set_guest_debug_data data
;
2350 data
.dbg
.control
= 0;
2351 if (env
->singlestep_enabled
)
2352 data
.dbg
.control
= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
2354 kvm_arch_update_guest_debug(env
, &data
.dbg
);
2355 data
.dbg
.control
|= reinject_trap
;
2357 on_vcpu(env
, kvm_invoke_set_guest_debug
, &data
);
2364 * dirty pages logging
2366 /* FIXME: use unsigned long pointer instead of unsigned char */
2367 unsigned char *kvm_dirty_bitmap
= NULL
;
2368 int kvm_physical_memory_set_dirty_tracking(int enable
)
2376 if (!kvm_dirty_bitmap
) {
2377 unsigned bitmap_size
= BITMAP_SIZE(phys_ram_size
);
2378 kvm_dirty_bitmap
= qemu_malloc(bitmap_size
);
2379 if (kvm_dirty_bitmap
== NULL
) {
2380 perror("Failed to allocate dirty pages bitmap");
2384 r
= kvm_dirty_pages_log_enable_all(kvm_context
);
2389 if (kvm_dirty_bitmap
) {
2390 r
= kvm_dirty_pages_log_reset(kvm_context
);
2391 qemu_free(kvm_dirty_bitmap
);
2392 kvm_dirty_bitmap
= NULL
;
2398 /* get kvm's dirty pages bitmap and update qemu's */
2399 static int kvm_get_dirty_pages_log_range(unsigned long start_addr
,
2400 unsigned char *bitmap
,
2401 unsigned long offset
,
2402 unsigned long mem_size
)
2404 unsigned int i
, j
, n
=0;
2406 unsigned long page_number
, addr
, addr1
;
2407 ram_addr_t ram_addr
;
2408 unsigned int len
= ((mem_size
/TARGET_PAGE_SIZE
) + 7) / 8;
2411 * bitmap-traveling is faster than memory-traveling (for addr...)
2412 * especially when most of the memory is not dirty.
2414 for (i
=0; i
<len
; i
++) {
2419 page_number
= i
* 8 + j
;
2420 addr1
= page_number
* TARGET_PAGE_SIZE
;
2421 addr
= offset
+ addr1
;
2422 ram_addr
= cpu_get_physical_page_desc(addr
);
2423 cpu_physical_memory_set_dirty(ram_addr
);
2429 static int kvm_get_dirty_bitmap_cb(unsigned long start
, unsigned long len
,
2430 void *bitmap
, void *opaque
)
2432 return kvm_get_dirty_pages_log_range(start
, bitmap
, start
, len
);
2436 * get kvm's dirty pages bitmap and update qemu's
2437 * we only care about physical ram, which resides in slots 0 and 3
2439 int kvm_update_dirty_pages_log(void)
2444 r
= kvm_get_dirty_pages_range(kvm_context
, 0, -1UL,
2446 kvm_get_dirty_bitmap_cb
);
2450 void kvm_qemu_log_memory(target_phys_addr_t start
, target_phys_addr_t size
,
2454 kvm_dirty_pages_log_enable_slot(kvm_context
, start
, size
);
2457 if (must_use_aliases_target(start
))
2460 kvm_dirty_pages_log_disable_slot(kvm_context
, start
, size
);
2464 int kvm_get_phys_ram_page_bitmap(unsigned char *bitmap
)
2466 unsigned int bsize
= BITMAP_SIZE(phys_ram_size
);
2467 unsigned int brsize
= BITMAP_SIZE(ram_size
);
2468 unsigned int extra_pages
= (phys_ram_size
- ram_size
) / TARGET_PAGE_SIZE
;
2469 unsigned int extra_bytes
= (extra_pages
+7)/8;
2470 unsigned int hole_start
= BITMAP_SIZE(0xa0000);
2471 unsigned int hole_end
= BITMAP_SIZE(0xc0000);
2473 memset(bitmap
, 0xFF, brsize
+ extra_bytes
);
2474 memset(bitmap
+ hole_start
, 0, hole_end
- hole_start
);
2475 memset(bitmap
+ brsize
+ extra_bytes
, 0, bsize
- brsize
- extra_bytes
);
2480 #ifdef KVM_CAP_IRQCHIP
2482 int kvm_set_irq(int irq
, int level
, int *status
)
2484 return kvm_set_irq_level(kvm_context
, irq
, level
, status
);
2489 int qemu_kvm_get_dirty_pages(unsigned long phys_addr
, void *buf
)
2491 return kvm_get_dirty_pages(kvm_context
, phys_addr
, buf
);
2494 void kvm_mutex_unlock(void)
2496 assert(!cpu_single_env
);
2497 pthread_mutex_unlock(&qemu_mutex
);
2500 void kvm_mutex_lock(void)
2502 pthread_mutex_lock(&qemu_mutex
);
2503 cpu_single_env
= NULL
;
2506 #ifdef USE_KVM_DEVICE_ASSIGNMENT
2507 void kvm_add_ioperm_data(struct ioperm_data
*data
)
2509 LIST_INSERT_HEAD(&ioperm_head
, data
, entries
);
2512 void kvm_remove_ioperm_data(unsigned long start_port
, unsigned long num
)
2514 struct ioperm_data
*data
;
2516 data
= LIST_FIRST(&ioperm_head
);
2518 struct ioperm_data
*next
= LIST_NEXT(data
, entries
);
2520 if (data
->start_port
== start_port
&& data
->num
== num
) {
2521 LIST_REMOVE(data
, entries
);
2529 void kvm_ioperm(CPUState
*env
, void *data
)
2531 if (kvm_enabled() && qemu_system_ready
)
2532 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
2537 int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
2542 if (must_use_aliases_source(start_addr
))
2546 kvm_get_dirty_pages_range(kvm_context
, start_addr
, end_addr
- start_addr
,
2547 NULL
, kvm_get_dirty_bitmap_cb
);
2552 int kvm_log_start(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
2555 if (must_use_aliases_source(phys_addr
))
2560 kvm_qemu_log_memory(phys_addr
, len
, 1);
2565 int kvm_log_stop(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
2568 if (must_use_aliases_source(phys_addr
))
2573 kvm_qemu_log_memory(phys_addr
, len
, 0);
2578 int kvm_set_boot_cpu_id(uint32_t id
)
2580 return kvm_set_boot_vcpu_id(kvm_context
, id
);
2585 struct kvm_x86_mce_data
2588 struct kvm_x86_mce
*mce
;
2591 static void kvm_do_inject_x86_mce(void *_data
)
2593 struct kvm_x86_mce_data
*data
= _data
;
2596 r
= kvm_set_mce(data
->env
->kvm_cpu_state
.vcpu_ctx
, data
->mce
);
2598 perror("kvm_set_mce FAILED");
2602 void kvm_inject_x86_mce(CPUState
*cenv
, int bank
, uint64_t status
,
2603 uint64_t mcg_status
, uint64_t addr
, uint64_t misc
)
2606 struct kvm_x86_mce mce
= {
2609 .mcg_status
= mcg_status
,
2613 struct kvm_x86_mce_data data
= {
2618 on_vcpu(cenv
, kvm_do_inject_x86_mce
, &data
);