4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
15 #include "qemu-common.h"
25 #include <sys/utsname.h>
26 #include <sys/syscall.h>
28 #include <sys/ioctl.h>
30 #include <sys/prctl.h>
36 #define PR_MCE_KILL 33
40 #define BUS_MCEERR_AR 4
43 #define BUS_MCEERR_AO 5
46 #define EXPECTED_KVM_API_VERSION 12
48 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
49 #error libkvm: userspace and kernel version mismatch
55 int kvm_pit_reinject
= 1;
60 kvm_context_t kvm_context
;
62 pthread_mutex_t qemu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
63 pthread_cond_t qemu_vcpu_cond
= PTHREAD_COND_INITIALIZER
;
64 pthread_cond_t qemu_system_cond
= PTHREAD_COND_INITIALIZER
;
65 pthread_cond_t qemu_pause_cond
= PTHREAD_COND_INITIALIZER
;
66 pthread_cond_t qemu_work_cond
= PTHREAD_COND_INITIALIZER
;
67 __thread CPUState
*current_env
;
69 static int qemu_system_ready
;
71 #define SIG_IPI (SIGRTMIN+4)
74 static int io_thread_fd
= -1;
75 static int io_thread_sigfd
= -1;
77 static CPUState
*kvm_debug_cpu_requested
;
79 static uint64_t phys_ram_size
;
81 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
82 /* The list of ioperm_data */
83 static QLIST_HEAD(, ioperm_data
) ioperm_head
;
86 //#define DEBUG_MEMREG
88 #define DPRINTF(fmt, args...) \
89 do { fprintf(stderr, "%s:%d " fmt , __func__, __LINE__, ##args); } while (0)
91 #define DPRINTF(fmt, args...) do {} while (0)
94 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
96 int kvm_abi
= EXPECTED_KVM_API_VERSION
;
99 #ifdef KVM_CAP_SET_GUEST_DEBUG
100 static int kvm_debug(CPUState
*env
,
101 struct kvm_debug_exit_arch
*arch_info
)
103 int handle
= kvm_arch_debug(arch_info
);
106 kvm_debug_cpu_requested
= env
;
113 static int handle_unhandled(uint64_t reason
)
115 fprintf(stderr
, "kvm: unhandled exit %" PRIx64
"\n", reason
);
120 static inline void set_gsi(kvm_context_t kvm
, unsigned int gsi
)
122 uint32_t *bitmap
= kvm
->used_gsi_bitmap
;
124 if (gsi
< kvm
->max_gsi
)
125 bitmap
[gsi
/ 32] |= 1U << (gsi
% 32);
127 DPRINTF("Invalid GSI %d\n");
130 static inline void clear_gsi(kvm_context_t kvm
, unsigned int gsi
)
132 uint32_t *bitmap
= kvm
->used_gsi_bitmap
;
134 if (gsi
< kvm
->max_gsi
)
135 bitmap
[gsi
/ 32] &= ~(1U << (gsi
% 32));
137 DPRINTF("Invalid GSI %d\n");
141 unsigned long phys_addr
;
143 unsigned long userspace_addr
;
148 struct slot_info slots
[KVM_MAX_NUM_MEM_REGIONS
];
150 static void init_slots(void)
154 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
)
158 static int get_free_slot(kvm_context_t kvm
)
163 #if defined(KVM_CAP_SET_TSS_ADDR) && !defined(__s390__)
164 tss_ext
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
170 * on older kernels where the set tss ioctl is not supprted we must save
171 * slot 0 to hold the extended memory, as the vmx will use the last 3
172 * pages of this slot.
179 for (; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
)
185 static void register_slot(int slot
, unsigned long phys_addr
,
186 unsigned long len
, unsigned long userspace_addr
,
189 slots
[slot
].phys_addr
= phys_addr
;
190 slots
[slot
].len
= len
;
191 slots
[slot
].userspace_addr
= userspace_addr
;
192 slots
[slot
].flags
= flags
;
195 static void free_slot(int slot
)
198 slots
[slot
].logging_count
= 0;
201 static int get_slot(unsigned long phys_addr
)
205 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
) {
206 if (slots
[i
].len
&& slots
[i
].phys_addr
<= phys_addr
&&
207 (slots
[i
].phys_addr
+ slots
[i
].len
- 1) >= phys_addr
)
213 /* Returns -1 if this slot is not totally contained on any other,
214 * and the number of the slot otherwise */
215 static int get_container_slot(uint64_t phys_addr
, unsigned long size
)
219 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
)
220 if (slots
[i
].len
&& slots
[i
].phys_addr
<= phys_addr
&&
221 (slots
[i
].phys_addr
+ slots
[i
].len
) >= phys_addr
+ size
)
226 int kvm_is_containing_region(kvm_context_t kvm
, unsigned long phys_addr
,
229 int slot
= get_container_slot(phys_addr
, size
);
236 * dirty pages logging control
238 static int kvm_dirty_pages_log_change(kvm_context_t kvm
,
239 unsigned long phys_addr
, unsigned flags
,
243 int slot
= get_slot(phys_addr
);
246 fprintf(stderr
, "BUG: %s: invalid parameters\n", __FUNCTION__
);
250 flags
= (slots
[slot
].flags
& ~mask
) | flags
;
251 if (flags
== slots
[slot
].flags
)
253 slots
[slot
].flags
= flags
;
256 struct kvm_userspace_memory_region mem
= {
258 .memory_size
= slots
[slot
].len
,
259 .guest_phys_addr
= slots
[slot
].phys_addr
,
260 .userspace_addr
= slots
[slot
].userspace_addr
,
261 .flags
= slots
[slot
].flags
,
265 DPRINTF("slot %d start %llx len %llx flags %x\n",
266 mem
.slot
, mem
.guest_phys_addr
, mem
.memory_size
, mem
.flags
);
267 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_USER_MEMORY_REGION
, &mem
);
269 fprintf(stderr
, "%s: %m\n", __FUNCTION__
);
274 static int kvm_dirty_pages_log_change_all(kvm_context_t kvm
,
275 int (*change
)(kvm_context_t kvm
,
281 for (i
= r
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
&& r
== 0; i
++) {
283 r
= change(kvm
, slots
[i
].phys_addr
, slots
[i
].len
);
288 int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm
, uint64_t phys_addr
,
291 int slot
= get_slot(phys_addr
);
293 DPRINTF("start %" PRIx64
" len %" PRIx64
"\n", phys_addr
, len
);
295 fprintf(stderr
, "BUG: %s: invalid parameters\n", __func__
);
299 if (slots
[slot
].logging_count
++)
302 return kvm_dirty_pages_log_change(kvm
, slots
[slot
].phys_addr
,
303 KVM_MEM_LOG_DIRTY_PAGES
,
304 KVM_MEM_LOG_DIRTY_PAGES
);
307 int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm
, uint64_t phys_addr
,
310 int slot
= get_slot(phys_addr
);
313 fprintf(stderr
, "BUG: %s: invalid parameters\n", __func__
);
317 if (--slots
[slot
].logging_count
)
320 return kvm_dirty_pages_log_change(kvm
, slots
[slot
].phys_addr
, 0,
321 KVM_MEM_LOG_DIRTY_PAGES
);
325 * Enable dirty page logging for all memory regions
327 int kvm_dirty_pages_log_enable_all(kvm_context_t kvm
)
329 if (kvm
->dirty_pages_log_all
)
331 kvm
->dirty_pages_log_all
= 1;
332 return kvm_dirty_pages_log_change_all(kvm
, kvm_dirty_pages_log_enable_slot
);
336 * Enable dirty page logging only for memory regions that were created with
337 * dirty logging enabled (disable for all other memory regions).
339 int kvm_dirty_pages_log_reset(kvm_context_t kvm
)
341 if (!kvm
->dirty_pages_log_all
)
343 kvm
->dirty_pages_log_all
= 0;
344 return kvm_dirty_pages_log_change_all(kvm
,
345 kvm_dirty_pages_log_disable_slot
);
349 static int kvm_create_context(void);
351 int kvm_init(int smp_cpus
)
357 fd
= open("/dev/kvm", O_RDWR
);
359 perror("open /dev/kvm");
362 r
= ioctl(fd
, KVM_GET_API_VERSION
, 0);
365 "kvm kernel version too old: "
366 "KVM_GET_API_VERSION ioctl not supported\n");
369 if (r
< EXPECTED_KVM_API_VERSION
) {
370 fprintf(stderr
, "kvm kernel version too old: "
371 "We expect API version %d or newer, but got "
372 "version %d\n", EXPECTED_KVM_API_VERSION
, r
);
375 if (r
> EXPECTED_KVM_API_VERSION
) {
376 fprintf(stderr
, "kvm userspace version too old\n");
380 kvm_page_size
= getpagesize();
381 kvm_state
= qemu_mallocz(sizeof(*kvm_state
));
382 kvm_context
= &kvm_state
->kvm_context
;
385 kvm_state
->vmfd
= -1;
386 kvm_context
->opaque
= cpu_single_env
;
387 kvm_context
->dirty_pages_log_all
= 0;
388 kvm_context
->no_irqchip_creation
= 0;
389 kvm_context
->no_pit_creation
= 0;
391 #ifdef KVM_CAP_SET_GUEST_DEBUG
392 QTAILQ_INIT(&kvm_state
->kvm_sw_breakpoints
);
395 gsi_count
= kvm_get_gsi_count(kvm_context
);
399 /* Round up so we can search ints using ffs */
400 gsi_bits
= ALIGN(gsi_count
, 32);
401 kvm_context
->used_gsi_bitmap
= qemu_mallocz(gsi_bits
/ 8);
402 kvm_context
->max_gsi
= gsi_bits
;
404 /* Mark any over-allocated bits as already in use */
405 for (i
= gsi_count
; i
< gsi_bits
; i
++)
406 set_gsi(kvm_context
, i
);
409 pthread_mutex_lock(&qemu_mutex
);
410 return kvm_create_context();
417 static void kvm_finalize(KVMState
*s
)
420 if (kvm->vcpu_fd[0] != -1)
421 close(kvm->vcpu_fd[0]);
422 if (kvm->vm_fd != -1)
429 void kvm_disable_irqchip_creation(kvm_context_t kvm
)
431 kvm
->no_irqchip_creation
= 1;
434 void kvm_disable_pit_creation(kvm_context_t kvm
)
436 kvm
->no_pit_creation
= 1;
439 static void kvm_create_vcpu(CPUState
*env
, int id
)
444 r
= kvm_vm_ioctl(kvm_state
, KVM_CREATE_VCPU
, id
);
446 fprintf(stderr
, "kvm_create_vcpu: %m\n");
451 env
->kvm_state
= kvm_state
;
453 mmap_size
= kvm_ioctl(kvm_state
, KVM_GET_VCPU_MMAP_SIZE
, 0);
455 fprintf(stderr
, "get vcpu mmap size: %m\n");
459 mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, env
->kvm_fd
,
461 if (env
->kvm_run
== MAP_FAILED
) {
462 fprintf(stderr
, "mmap vcpu area: %m\n");
471 static int kvm_set_boot_vcpu_id(kvm_context_t kvm
, uint32_t id
)
473 #ifdef KVM_CAP_SET_BOOT_CPU_ID
474 int r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_BOOT_CPU_ID
);
476 return kvm_vm_ioctl(kvm_state
, KVM_SET_BOOT_CPU_ID
, id
);
483 int kvm_create_vm(kvm_context_t kvm
)
486 #ifdef KVM_CAP_IRQ_ROUTING
487 kvm
->irq_routes
= qemu_mallocz(sizeof(*kvm
->irq_routes
));
488 kvm
->nr_allocated_irq_routes
= 0;
491 fd
= kvm_ioctl(kvm_state
, KVM_CREATE_VM
, 0);
493 fprintf(stderr
, "kvm_create_vm: %m\n");
496 kvm_state
->vmfd
= fd
;
500 static int kvm_create_default_phys_mem(kvm_context_t kvm
,
501 unsigned long phys_mem_bytes
,
504 #ifdef KVM_CAP_USER_MEMORY
505 int r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_USER_MEMORY
);
509 "Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported\n");
511 #error Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported
516 void kvm_create_irqchip(kvm_context_t kvm
)
520 kvm
->irqchip_in_kernel
= 0;
521 #ifdef KVM_CAP_IRQCHIP
522 if (!kvm
->no_irqchip_creation
) {
523 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_IRQCHIP
);
524 if (r
> 0) { /* kernel irqchip supported */
525 r
= kvm_vm_ioctl(kvm_state
, KVM_CREATE_IRQCHIP
);
527 kvm
->irqchip_inject_ioctl
= KVM_IRQ_LINE
;
528 #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
529 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
,
530 KVM_CAP_IRQ_INJECT_STATUS
);
532 kvm
->irqchip_inject_ioctl
= KVM_IRQ_LINE_STATUS
;
534 kvm
->irqchip_in_kernel
= 1;
536 fprintf(stderr
, "Create kernel PIC irqchip failed\n");
540 kvm_state
->irqchip_in_kernel
= kvm
->irqchip_in_kernel
;
543 int kvm_create(kvm_context_t kvm
, unsigned long phys_mem_bytes
, void **vm_mem
)
547 r
= kvm_create_vm(kvm
);
550 r
= kvm_arch_create(kvm
, phys_mem_bytes
, vm_mem
);
554 r
= kvm_create_default_phys_mem(kvm
, phys_mem_bytes
, vm_mem
);
557 kvm_create_irqchip(kvm
);
563 int kvm_register_phys_mem(kvm_context_t kvm
,
564 unsigned long phys_start
, void *userspace_addr
,
565 unsigned long len
, int log
)
568 struct kvm_userspace_memory_region memory
= {
570 .guest_phys_addr
= phys_start
,
571 .userspace_addr
= (unsigned long) (uintptr_t) userspace_addr
,
572 .flags
= log
? KVM_MEM_LOG_DIRTY_PAGES
: 0,
576 memory
.slot
= get_free_slot(kvm
);
578 ("memory: gpa: %llx, size: %llx, uaddr: %llx, slot: %x, flags: %lx\n",
579 memory
.guest_phys_addr
, memory
.memory_size
, memory
.userspace_addr
,
580 memory
.slot
, memory
.flags
);
581 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_USER_MEMORY_REGION
, &memory
);
583 fprintf(stderr
, "create_userspace_phys_mem: %s\n", strerror(-r
));
586 register_slot(memory
.slot
, memory
.guest_phys_addr
, memory
.memory_size
,
587 memory
.userspace_addr
, memory
.flags
);
592 /* destroy/free a whole slot.
593 * phys_start, len and slot are the params passed to kvm_create_phys_mem()
595 void kvm_destroy_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
600 struct kvm_userspace_memory_region memory
= {
602 .guest_phys_addr
= phys_start
,
607 slot
= get_slot(phys_start
);
609 if ((slot
>= KVM_MAX_NUM_MEM_REGIONS
) || (slot
== -1)) {
610 fprintf(stderr
, "BUG: %s: invalid parameters (slot=%d)\n", __FUNCTION__
,
614 if (phys_start
!= slots
[slot
].phys_addr
) {
616 "WARNING: %s: phys_start is 0x%lx expecting 0x%lx\n",
617 __FUNCTION__
, phys_start
, slots
[slot
].phys_addr
);
618 phys_start
= slots
[slot
].phys_addr
;
622 DPRINTF("slot %d start %llx len %llx flags %x\n",
623 memory
.slot
, memory
.guest_phys_addr
, memory
.memory_size
,
625 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_USER_MEMORY_REGION
, &memory
);
627 fprintf(stderr
, "destroy_userspace_phys_mem: %s", strerror(-r
));
631 free_slot(memory
.slot
);
634 void kvm_unregister_memory_area(kvm_context_t kvm
, uint64_t phys_addr
,
638 int slot
= get_container_slot(phys_addr
, size
);
641 DPRINTF("Unregistering memory region %llx (%lx)\n", phys_addr
, size
);
642 kvm_destroy_phys_mem(kvm
, phys_addr
, size
);
647 static int kvm_get_map(kvm_context_t kvm
, int ioctl_num
, int slot
, void *buf
)
650 struct kvm_dirty_log log
= {
654 log
.dirty_bitmap
= buf
;
656 r
= kvm_vm_ioctl(kvm_state
, ioctl_num
, &log
);
662 int kvm_get_dirty_pages(kvm_context_t kvm
, unsigned long phys_addr
, void *buf
)
666 slot
= get_slot(phys_addr
);
667 return kvm_get_map(kvm
, KVM_GET_DIRTY_LOG
, slot
, buf
);
670 int kvm_get_dirty_pages_range(kvm_context_t kvm
, unsigned long phys_addr
,
671 unsigned long len
, void *opaque
,
672 int (*cb
)(unsigned long start
,
673 unsigned long len
, void *bitmap
,
678 unsigned long end_addr
= phys_addr
+ len
;
681 for (i
= 0; i
< KVM_MAX_NUM_MEM_REGIONS
; ++i
) {
682 if ((slots
[i
].len
&& (uint64_t) slots
[i
].phys_addr
>= phys_addr
)
683 && ((uint64_t) slots
[i
].phys_addr
+ slots
[i
].len
<= end_addr
)) {
684 buf
= qemu_malloc(BITMAP_SIZE(slots
[i
].len
));
685 r
= kvm_get_map(kvm
, KVM_GET_DIRTY_LOG
, i
, buf
);
690 r
= cb(slots
[i
].phys_addr
, slots
[i
].len
, buf
, opaque
);
699 #ifdef KVM_CAP_IRQCHIP
701 int kvm_set_irq_level(kvm_context_t kvm
, int irq
, int level
, int *status
)
703 struct kvm_irq_level event
;
706 if (!kvm
->irqchip_in_kernel
)
710 r
= kvm_vm_ioctl(kvm_state
, kvm
->irqchip_inject_ioctl
, &event
);
712 perror("kvm_set_irq_level");
715 #ifdef KVM_CAP_IRQ_INJECT_STATUS
717 (kvm
->irqchip_inject_ioctl
== KVM_IRQ_LINE
) ? 1 : event
.status
;
726 int kvm_get_irqchip(kvm_context_t kvm
, struct kvm_irqchip
*chip
)
730 if (!kvm
->irqchip_in_kernel
)
732 r
= kvm_vm_ioctl(kvm_state
, KVM_GET_IRQCHIP
, chip
);
734 perror("kvm_get_irqchip\n");
739 int kvm_set_irqchip(kvm_context_t kvm
, struct kvm_irqchip
*chip
)
743 if (!kvm
->irqchip_in_kernel
)
745 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_IRQCHIP
, chip
);
747 perror("kvm_set_irqchip\n");
754 static int handle_debug(CPUState
*env
)
756 #ifdef KVM_CAP_SET_GUEST_DEBUG
757 struct kvm_run
*run
= env
->kvm_run
;
759 return kvm_debug(env
, &run
->debug
.arch
);
765 int kvm_get_regs(CPUState
*env
, struct kvm_regs
*regs
)
767 return kvm_vcpu_ioctl(env
, KVM_GET_REGS
, regs
);
770 int kvm_set_regs(CPUState
*env
, struct kvm_regs
*regs
)
772 return kvm_vcpu_ioctl(env
, KVM_SET_REGS
, regs
);
775 int kvm_get_fpu(CPUState
*env
, struct kvm_fpu
*fpu
)
777 return kvm_vcpu_ioctl(env
, KVM_GET_FPU
, fpu
);
780 int kvm_set_fpu(CPUState
*env
, struct kvm_fpu
*fpu
)
782 return kvm_vcpu_ioctl(env
, KVM_SET_FPU
, fpu
);
785 int kvm_get_sregs(CPUState
*env
, struct kvm_sregs
*sregs
)
787 return kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, sregs
);
790 int kvm_set_sregs(CPUState
*env
, struct kvm_sregs
*sregs
)
792 return kvm_vcpu_ioctl(env
, KVM_SET_SREGS
, sregs
);
795 #ifdef KVM_CAP_MP_STATE
796 int kvm_get_mpstate(CPUState
*env
, struct kvm_mp_state
*mp_state
)
800 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_MP_STATE
);
802 return kvm_vcpu_ioctl(env
, KVM_GET_MP_STATE
, mp_state
);
806 int kvm_set_mpstate(CPUState
*env
, struct kvm_mp_state
*mp_state
)
810 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_MP_STATE
);
812 return kvm_vcpu_ioctl(env
, KVM_SET_MP_STATE
, mp_state
);
817 static int handle_mmio(CPUState
*env
)
819 unsigned long addr
= env
->kvm_run
->mmio
.phys_addr
;
820 struct kvm_run
*kvm_run
= env
->kvm_run
;
821 void *data
= kvm_run
->mmio
.data
;
823 /* hack: Red Hat 7.1 generates these weird accesses. */
824 if ((addr
> 0xa0000 - 4 && addr
<= 0xa0000) && kvm_run
->mmio
.len
== 3)
827 cpu_physical_memory_rw(addr
, data
, kvm_run
->mmio
.len
, kvm_run
->mmio
.is_write
);
831 int handle_io_window(kvm_context_t kvm
)
836 int handle_shutdown(kvm_context_t kvm
, CPUState
*env
)
838 /* stop the current vcpu from going back to guest mode */
841 qemu_system_reset_request();
845 static inline void push_nmi(kvm_context_t kvm
)
847 #ifdef KVM_CAP_USER_NMI
848 kvm_arch_push_nmi(kvm
->opaque
);
849 #endif /* KVM_CAP_USER_NMI */
852 void post_kvm_run(kvm_context_t kvm
, CPUState
*env
)
854 pthread_mutex_lock(&qemu_mutex
);
855 kvm_arch_post_run(env
, env
->kvm_run
);
856 cpu_single_env
= env
;
859 int pre_kvm_run(kvm_context_t kvm
, CPUState
*env
)
861 kvm_arch_pre_run(env
, env
->kvm_run
);
863 pthread_mutex_unlock(&qemu_mutex
);
867 int kvm_is_ready_for_interrupt_injection(CPUState
*env
)
869 return env
->kvm_run
->ready_for_interrupt_injection
;
872 int kvm_run(CPUState
*env
)
875 kvm_context_t kvm
= &env
->kvm_state
->kvm_context
;
876 struct kvm_run
*run
= env
->kvm_run
;
877 int fd
= env
->kvm_fd
;
881 #if !defined(__s390__)
882 if (!kvm
->irqchip_in_kernel
)
883 run
->request_interrupt_window
= kvm_arch_try_push_interrupts(env
);
886 if (env
->kvm_cpu_state
.regs_modified
) {
887 kvm_arch_put_registers(env
);
888 env
->kvm_cpu_state
.regs_modified
= 0;
891 r
= pre_kvm_run(kvm
, env
);
894 r
= ioctl(fd
, KVM_RUN
, 0);
896 if (r
== -1 && errno
!= EINTR
&& errno
!= EAGAIN
) {
898 post_kvm_run(kvm
, env
);
899 fprintf(stderr
, "kvm_run: %s\n", strerror(-r
));
903 post_kvm_run(kvm
, env
);
905 #if defined(KVM_CAP_COALESCED_MMIO)
906 if (kvm_state
->coalesced_mmio
) {
907 struct kvm_coalesced_mmio_ring
*ring
=
908 (void *) run
+ kvm_state
->coalesced_mmio
* PAGE_SIZE
;
909 while (ring
->first
!= ring
->last
) {
910 cpu_physical_memory_rw(ring
->coalesced_mmio
[ring
->first
].phys_addr
,
911 &ring
->coalesced_mmio
[ring
->first
].data
[0],
912 ring
->coalesced_mmio
[ring
->first
].len
, 1);
914 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
919 #if !defined(__s390__)
921 r
= handle_io_window(kvm
);
926 switch (run
->exit_reason
) {
927 case KVM_EXIT_UNKNOWN
:
928 r
= handle_unhandled(run
->hw
.hardware_exit_reason
);
930 case KVM_EXIT_FAIL_ENTRY
:
931 r
= handle_unhandled(run
->fail_entry
.hardware_entry_failure_reason
);
933 case KVM_EXIT_EXCEPTION
:
934 fprintf(stderr
, "exception %d (%x)\n", run
->ex
.exception
,
941 r
= kvm_handle_io(run
->io
.port
,
942 (uint8_t *)run
+ run
->io
.data_offset
,
948 r
= handle_debug(env
);
951 r
= handle_mmio(env
);
954 r
= kvm_arch_halt(env
);
956 case KVM_EXIT_IRQ_WINDOW_OPEN
:
958 case KVM_EXIT_SHUTDOWN
:
959 r
= handle_shutdown(kvm
, env
);
961 #if defined(__s390__)
962 case KVM_EXIT_S390_SIEIC
:
963 r
= kvm_s390_handle_intercept(kvm
, env
, run
);
965 case KVM_EXIT_S390_RESET
:
966 r
= kvm_s390_handle_reset(kvm
, env
, run
);
969 case KVM_EXIT_INTERNAL_ERROR
:
970 fprintf(stderr
, "KVM internal error. Suberror: %d\n",
971 run
->internal
.suberror
);
973 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
)
974 fprintf(stderr
, "emulation failure, check dmesg for details\n");
978 if (kvm_arch_run(env
)) {
979 fprintf(stderr
, "unhandled vm exit: 0x%x\n", run
->exit_reason
);
992 int kvm_inject_irq(CPUState
*env
, unsigned irq
)
994 struct kvm_interrupt intr
;
997 return kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &intr
);
1000 #ifdef KVM_CAP_SET_GUEST_DEBUG
1001 int kvm_set_guest_debug(CPUState
*env
, struct kvm_guest_debug
*dbg
)
1003 return kvm_vcpu_ioctl(env
, KVM_SET_GUEST_DEBUG
, dbg
);
1007 int kvm_set_signal_mask(CPUState
*env
, const sigset_t
*sigset
)
1009 struct kvm_signal_mask
*sigmask
;
1013 return kvm_vcpu_ioctl(env
, KVM_SET_SIGNAL_MASK
, NULL
);
1015 sigmask
= qemu_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
1018 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
1019 r
= kvm_vcpu_ioctl(env
, KVM_SET_SIGNAL_MASK
, sigmask
);
1024 int kvm_pit_in_kernel(kvm_context_t kvm
)
1026 return kvm
->pit_in_kernel
;
1029 int kvm_inject_nmi(CPUState
*env
)
1031 #ifdef KVM_CAP_USER_NMI
1032 return kvm_vcpu_ioctl(env
, KVM_NMI
);
1038 int kvm_init_coalesced_mmio(kvm_context_t kvm
)
1041 kvm_state
->coalesced_mmio
= 0;
1042 #ifdef KVM_CAP_COALESCED_MMIO
1043 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_COALESCED_MMIO
);
1045 kvm_state
->coalesced_mmio
= r
;
1052 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1053 int kvm_assign_pci_device(kvm_context_t kvm
,
1054 struct kvm_assigned_pci_dev
*assigned_dev
)
1056 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_PCI_DEVICE
, assigned_dev
);
1059 static int kvm_old_assign_irq(kvm_context_t kvm
,
1060 struct kvm_assigned_irq
*assigned_irq
)
1062 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_IRQ
, assigned_irq
);
1065 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
1066 int kvm_assign_irq(kvm_context_t kvm
, struct kvm_assigned_irq
*assigned_irq
)
1070 ret
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_ASSIGN_DEV_IRQ
);
1072 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_DEV_IRQ
, assigned_irq
);
1075 return kvm_old_assign_irq(kvm
, assigned_irq
);
1078 int kvm_deassign_irq(kvm_context_t kvm
, struct kvm_assigned_irq
*assigned_irq
)
1080 return kvm_vm_ioctl(kvm_state
, KVM_DEASSIGN_DEV_IRQ
, assigned_irq
);
1083 int kvm_assign_irq(kvm_context_t kvm
, struct kvm_assigned_irq
*assigned_irq
)
1085 return kvm_old_assign_irq(kvm
, assigned_irq
);
1090 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
1091 int kvm_deassign_pci_device(kvm_context_t kvm
,
1092 struct kvm_assigned_pci_dev
*assigned_dev
)
1094 return kvm_vm_ioctl(kvm_state
, KVM_DEASSIGN_PCI_DEVICE
, assigned_dev
);
1098 int kvm_destroy_memory_region_works(kvm_context_t kvm
)
1102 #ifdef KVM_CAP_DESTROY_MEMORY_REGION_WORKS
1104 kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
,
1105 KVM_CAP_DESTROY_MEMORY_REGION_WORKS
);
1112 int kvm_reinject_control(kvm_context_t kvm
, int pit_reinject
)
1114 #ifdef KVM_CAP_REINJECT_CONTROL
1116 struct kvm_reinject_control control
;
1118 control
.pit_reinject
= pit_reinject
;
1120 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_REINJECT_CONTROL
);
1122 return kvm_vm_ioctl(kvm_state
, KVM_REINJECT_CONTROL
, &control
);
1128 int kvm_has_gsi_routing(kvm_context_t kvm
)
1132 #ifdef KVM_CAP_IRQ_ROUTING
1133 r
= kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
1138 int kvm_get_gsi_count(kvm_context_t kvm
)
1140 #ifdef KVM_CAP_IRQ_ROUTING
1141 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
1147 int kvm_clear_gsi_routes(kvm_context_t kvm
)
1149 #ifdef KVM_CAP_IRQ_ROUTING
1150 kvm
->irq_routes
->nr
= 0;
1157 int kvm_add_routing_entry(kvm_context_t kvm
,
1158 struct kvm_irq_routing_entry
*entry
)
1160 #ifdef KVM_CAP_IRQ_ROUTING
1161 struct kvm_irq_routing
*z
;
1162 struct kvm_irq_routing_entry
*new;
1165 if (kvm
->irq_routes
->nr
== kvm
->nr_allocated_irq_routes
) {
1166 n
= kvm
->nr_allocated_irq_routes
* 2;
1169 size
= sizeof(struct kvm_irq_routing
);
1170 size
+= n
* sizeof(*new);
1171 z
= realloc(kvm
->irq_routes
, size
);
1174 kvm
->nr_allocated_irq_routes
= n
;
1175 kvm
->irq_routes
= z
;
1177 n
= kvm
->irq_routes
->nr
++;
1178 new = &kvm
->irq_routes
->entries
[n
];
1179 memset(new, 0, sizeof(*new));
1180 new->gsi
= entry
->gsi
;
1181 new->type
= entry
->type
;
1182 new->flags
= entry
->flags
;
1185 set_gsi(kvm
, entry
->gsi
);
1193 int kvm_add_irq_route(kvm_context_t kvm
, int gsi
, int irqchip
, int pin
)
1195 #ifdef KVM_CAP_IRQ_ROUTING
1196 struct kvm_irq_routing_entry e
;
1199 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1201 e
.u
.irqchip
.irqchip
= irqchip
;
1202 e
.u
.irqchip
.pin
= pin
;
1203 return kvm_add_routing_entry(kvm
, &e
);
1209 int kvm_del_routing_entry(kvm_context_t kvm
,
1210 struct kvm_irq_routing_entry
*entry
)
1212 #ifdef KVM_CAP_IRQ_ROUTING
1213 struct kvm_irq_routing_entry
*e
, *p
;
1214 int i
, gsi
, found
= 0;
1218 for (i
= 0; i
< kvm
->irq_routes
->nr
; ++i
) {
1219 e
= &kvm
->irq_routes
->entries
[i
];
1220 if (e
->type
== entry
->type
&& e
->gsi
== gsi
) {
1222 case KVM_IRQ_ROUTING_IRQCHIP
:{
1223 if (e
->u
.irqchip
.irqchip
==
1224 entry
->u
.irqchip
.irqchip
1225 && e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
1226 p
= &kvm
->irq_routes
->entries
[--kvm
->irq_routes
->nr
];
1232 case KVM_IRQ_ROUTING_MSI
:{
1233 if (e
->u
.msi
.address_lo
==
1234 entry
->u
.msi
.address_lo
1235 && e
->u
.msi
.address_hi
==
1236 entry
->u
.msi
.address_hi
1237 && e
->u
.msi
.data
== entry
->u
.msi
.data
) {
1238 p
= &kvm
->irq_routes
->entries
[--kvm
->irq_routes
->nr
];
1248 /* If there are no other users of this GSI
1249 * mark it available in the bitmap */
1250 for (i
= 0; i
< kvm
->irq_routes
->nr
; i
++) {
1251 e
= &kvm
->irq_routes
->entries
[i
];
1255 if (i
== kvm
->irq_routes
->nr
)
1256 clear_gsi(kvm
, gsi
);
1268 int kvm_update_routing_entry(kvm_context_t kvm
,
1269 struct kvm_irq_routing_entry
*entry
,
1270 struct kvm_irq_routing_entry
*newentry
)
1272 #ifdef KVM_CAP_IRQ_ROUTING
1273 struct kvm_irq_routing_entry
*e
;
1276 if (entry
->gsi
!= newentry
->gsi
|| entry
->type
!= newentry
->type
) {
1280 for (i
= 0; i
< kvm
->irq_routes
->nr
; ++i
) {
1281 e
= &kvm
->irq_routes
->entries
[i
];
1282 if (e
->type
!= entry
->type
|| e
->gsi
!= entry
->gsi
) {
1286 case KVM_IRQ_ROUTING_IRQCHIP
:
1287 if (e
->u
.irqchip
.irqchip
== entry
->u
.irqchip
.irqchip
&&
1288 e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
1289 memcpy(&e
->u
.irqchip
, &newentry
->u
.irqchip
,
1290 sizeof e
->u
.irqchip
);
1294 case KVM_IRQ_ROUTING_MSI
:
1295 if (e
->u
.msi
.address_lo
== entry
->u
.msi
.address_lo
&&
1296 e
->u
.msi
.address_hi
== entry
->u
.msi
.address_hi
&&
1297 e
->u
.msi
.data
== entry
->u
.msi
.data
) {
1298 memcpy(&e
->u
.msi
, &newentry
->u
.msi
, sizeof e
->u
.msi
);
1312 int kvm_del_irq_route(kvm_context_t kvm
, int gsi
, int irqchip
, int pin
)
1314 #ifdef KVM_CAP_IRQ_ROUTING
1315 struct kvm_irq_routing_entry e
;
1318 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1320 e
.u
.irqchip
.irqchip
= irqchip
;
1321 e
.u
.irqchip
.pin
= pin
;
1322 return kvm_del_routing_entry(kvm
, &e
);
1328 int kvm_commit_irq_routes(kvm_context_t kvm
)
1330 #ifdef KVM_CAP_IRQ_ROUTING
1331 kvm
->irq_routes
->flags
= 0;
1332 return kvm_vm_ioctl(kvm_state
, KVM_SET_GSI_ROUTING
, kvm
->irq_routes
);
1338 int kvm_get_irq_route_gsi(kvm_context_t kvm
)
1341 uint32_t *buf
= kvm
->used_gsi_bitmap
;
1343 /* Return the lowest unused GSI in the bitmap */
1344 for (i
= 0; i
< kvm
->max_gsi
/ 32; i
++) {
1349 return bit
- 1 + i
* 32;
1355 #ifdef KVM_CAP_DEVICE_MSIX
1356 int kvm_assign_set_msix_nr(kvm_context_t kvm
,
1357 struct kvm_assigned_msix_nr
*msix_nr
)
1359 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_SET_MSIX_NR
, msix_nr
);
1362 int kvm_assign_set_msix_entry(kvm_context_t kvm
,
1363 struct kvm_assigned_msix_entry
*entry
)
1365 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_SET_MSIX_ENTRY
, entry
);
1369 #if defined(KVM_CAP_IRQFD) && defined(CONFIG_EVENTFD)
1371 #include <sys/eventfd.h>
1373 static int _kvm_irqfd(kvm_context_t kvm
, int fd
, int gsi
, int flags
)
1375 struct kvm_irqfd data
= {
1381 return kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &data
);
1384 int kvm_irqfd(kvm_context_t kvm
, int gsi
, int flags
)
1389 if (!kvm_check_extension(kvm_state
, KVM_CAP_IRQFD
))
1396 r
= _kvm_irqfd(kvm
, fd
, gsi
, 0);
1405 #else /* KVM_CAP_IRQFD */
1407 int kvm_irqfd(kvm_context_t kvm
, int gsi
, int flags
)
1412 #endif /* KVM_CAP_IRQFD */
1413 static inline unsigned long kvm_get_thread_id(void)
1415 return syscall(SYS_gettid
);
1418 static void qemu_cond_wait(pthread_cond_t
*cond
)
1420 CPUState
*env
= cpu_single_env
;
1422 pthread_cond_wait(cond
, &qemu_mutex
);
1423 cpu_single_env
= env
;
1426 static void sig_ipi_handler(int n
)
1430 static void hardware_memory_error(void)
1432 fprintf(stderr
, "Hardware memory error!\n");
1436 static void sigbus_reraise(void)
1439 struct sigaction action
;
1441 memset(&action
, 0, sizeof(action
));
1442 action
.sa_handler
= SIG_DFL
;
1443 if (!sigaction(SIGBUS
, &action
, NULL
)) {
1446 sigaddset(&set
, SIGBUS
);
1447 sigprocmask(SIG_UNBLOCK
, &set
, NULL
);
1449 perror("Failed to re-raise SIGBUS!\n");
1453 static void sigbus_handler(int n
, struct qemu_signalfd_siginfo
*siginfo
,
1456 #if defined(KVM_CAP_MCE) && defined(TARGET_I386)
1457 if (first_cpu
->mcg_cap
&& siginfo
->ssi_addr
1458 && siginfo
->ssi_code
== BUS_MCEERR_AO
) {
1460 unsigned long paddr
;
1463 /* Hope we are lucky for AO MCE */
1464 if (do_qemu_ram_addr_from_host((void *)(intptr_t)siginfo
->ssi_addr
,
1466 fprintf(stderr
, "Hardware memory error for memory used by "
1467 "QEMU itself instead of guest system!: %llx\n",
1468 (unsigned long long)siginfo
->ssi_addr
);
1471 status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
1472 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
1474 kvm_inject_x86_mce(first_cpu
, 9, status
,
1475 MCG_STATUS_MCIP
| MCG_STATUS_RIPV
, paddr
,
1476 (MCM_ADDR_PHYS
<< 6) | 0xc, 1);
1477 for (cenv
= first_cpu
->next_cpu
; cenv
!= NULL
; cenv
= cenv
->next_cpu
)
1478 kvm_inject_x86_mce(cenv
, 1, MCI_STATUS_VAL
| MCI_STATUS_UC
,
1479 MCG_STATUS_MCIP
| MCG_STATUS_RIPV
, 0, 0, 1);
1483 if (siginfo
->ssi_code
== BUS_MCEERR_AO
)
1485 else if (siginfo
->ssi_code
== BUS_MCEERR_AR
)
1486 hardware_memory_error();
1492 static void on_vcpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
1494 struct qemu_work_item wi
;
1496 if (env
== current_env
) {
1503 if (!env
->kvm_cpu_state
.queued_work_first
)
1504 env
->kvm_cpu_state
.queued_work_first
= &wi
;
1506 env
->kvm_cpu_state
.queued_work_last
->next
= &wi
;
1507 env
->kvm_cpu_state
.queued_work_last
= &wi
;
1511 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
1513 qemu_cond_wait(&qemu_work_cond
);
1516 void kvm_arch_get_registers(CPUState
*env
)
1518 kvm_arch_save_regs(env
);
1521 static void do_kvm_cpu_synchronize_state(void *_env
)
1523 CPUState
*env
= _env
;
1524 if (!env
->kvm_cpu_state
.regs_modified
) {
1525 kvm_arch_get_registers(env
);
1526 env
->kvm_cpu_state
.regs_modified
= 1;
1530 void kvm_cpu_synchronize_state(CPUState
*env
)
1532 if (!env
->kvm_cpu_state
.regs_modified
)
1533 on_vcpu(env
, do_kvm_cpu_synchronize_state
, env
);
1536 static void inject_interrupt(void *data
)
1538 cpu_interrupt(current_env
, (long) data
);
1541 void kvm_inject_interrupt(CPUState
*env
, int mask
)
1543 on_vcpu(env
, inject_interrupt
, (void *) (long) mask
);
1546 void kvm_update_interrupt_request(CPUState
*env
)
1551 if (!current_env
|| !current_env
->created
)
1554 * Testing for created here is really redundant
1556 if (current_env
&& current_env
->created
&&
1557 env
!= current_env
&& !env
->kvm_cpu_state
.signalled
)
1561 env
->kvm_cpu_state
.signalled
= 1;
1562 if (env
->kvm_cpu_state
.thread
)
1563 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
1568 static void kvm_do_load_registers(void *_env
)
1570 CPUState
*env
= _env
;
1572 kvm_arch_load_regs(env
);
1575 void kvm_load_registers(CPUState
*env
)
1577 if (kvm_enabled() && qemu_system_ready
)
1578 on_vcpu(env
, kvm_do_load_registers
, env
);
1581 static void kvm_do_save_registers(void *_env
)
1583 CPUState
*env
= _env
;
1585 kvm_arch_save_regs(env
);
1588 void kvm_save_registers(CPUState
*env
)
1591 on_vcpu(env
, kvm_do_save_registers
, env
);
1594 static void kvm_do_load_mpstate(void *_env
)
1596 CPUState
*env
= _env
;
1598 kvm_arch_load_mpstate(env
);
1601 void kvm_load_mpstate(CPUState
*env
)
1603 if (kvm_enabled() && qemu_system_ready
)
1604 on_vcpu(env
, kvm_do_load_mpstate
, env
);
1607 static void kvm_do_save_mpstate(void *_env
)
1609 CPUState
*env
= _env
;
1611 kvm_arch_save_mpstate(env
);
1612 #ifdef KVM_CAP_MP_STATE
1613 if (kvm_irqchip_in_kernel())
1614 env
->halted
= (env
->mp_state
== KVM_MP_STATE_HALTED
);
1618 void kvm_save_mpstate(CPUState
*env
)
1621 on_vcpu(env
, kvm_do_save_mpstate
, env
);
1624 int kvm_cpu_exec(CPUState
*env
)
1630 printf("kvm_run returned %d\n", r
);
1637 static int is_cpu_stopped(CPUState
*env
)
1639 return !vm_running
|| env
->stopped
;
1642 static void flush_queued_work(CPUState
*env
)
1644 struct qemu_work_item
*wi
;
1646 if (!env
->kvm_cpu_state
.queued_work_first
)
1649 while ((wi
= env
->kvm_cpu_state
.queued_work_first
)) {
1650 env
->kvm_cpu_state
.queued_work_first
= wi
->next
;
1654 env
->kvm_cpu_state
.queued_work_last
= NULL
;
1655 pthread_cond_broadcast(&qemu_work_cond
);
1658 static void kvm_on_sigbus(CPUState
*env
, siginfo_t
*siginfo
)
1660 #if defined(KVM_CAP_MCE) && defined(TARGET_I386)
1661 struct kvm_x86_mce mce
= {
1664 unsigned long paddr
;
1667 if (env
->mcg_cap
&& siginfo
->si_addr
1668 && (siginfo
->si_code
== BUS_MCEERR_AR
1669 || siginfo
->si_code
== BUS_MCEERR_AO
)) {
1670 if (siginfo
->si_code
== BUS_MCEERR_AR
) {
1671 /* Fake an Intel architectural Data Load SRAR UCR */
1672 mce
.status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
1673 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
1674 | MCI_STATUS_AR
| 0x134;
1675 mce
.misc
= (MCM_ADDR_PHYS
<< 6) | 0xc;
1676 mce
.mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_EIPV
;
1678 /* Fake an Intel architectural Memory scrubbing UCR */
1679 mce
.status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
1680 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
1682 mce
.misc
= (MCM_ADDR_PHYS
<< 6) | 0xc;
1683 mce
.mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_RIPV
;
1685 if (do_qemu_ram_addr_from_host((void *)siginfo
->si_addr
, &paddr
)) {
1686 fprintf(stderr
, "Hardware memory error for memory used by "
1687 "QEMU itself instaed of guest system!\n");
1688 /* Hope we are lucky for AO MCE */
1689 if (siginfo
->si_code
== BUS_MCEERR_AO
)
1692 hardware_memory_error();
1695 r
= kvm_set_mce(env
, &mce
);
1697 fprintf(stderr
, "kvm_set_mce: %s\n", strerror(errno
));
1703 if (siginfo
->si_code
== BUS_MCEERR_AO
)
1705 else if (siginfo
->si_code
== BUS_MCEERR_AR
)
1706 hardware_memory_error();
1712 static void kvm_main_loop_wait(CPUState
*env
, int timeout
)
1720 ts
.tv_sec
= timeout
/ 1000;
1721 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
1722 sigemptyset(&waitset
);
1723 sigaddset(&waitset
, SIG_IPI
);
1724 sigaddset(&waitset
, SIGBUS
);
1727 pthread_mutex_unlock(&qemu_mutex
);
1729 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
1732 pthread_mutex_lock(&qemu_mutex
);
1734 if (r
== -1 && !(e
== EAGAIN
|| e
== EINTR
)) {
1735 printf("sigtimedwait: %s\n", strerror(e
));
1741 kvm_on_sigbus(env
, &siginfo
);
1747 r
= sigpending(&chkset
);
1749 printf("sigpending: %s\n", strerror(e
));
1752 } while (sigismember(&chkset
, SIG_IPI
) || sigismember(&chkset
, SIGBUS
));
1754 cpu_single_env
= env
;
1755 flush_queued_work(env
);
1760 pthread_cond_signal(&qemu_pause_cond
);
1763 env
->kvm_cpu_state
.signalled
= 0;
1766 static int all_threads_paused(void)
1768 CPUState
*penv
= first_cpu
;
1773 penv
= (CPUState
*) penv
->next_cpu
;
1779 static void pause_all_threads(void)
1781 CPUState
*penv
= first_cpu
;
1784 if (penv
!= cpu_single_env
) {
1786 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
1792 penv
= (CPUState
*) penv
->next_cpu
;
1795 while (!all_threads_paused())
1796 qemu_cond_wait(&qemu_pause_cond
);
1799 static void resume_all_threads(void)
1801 CPUState
*penv
= first_cpu
;
1803 assert(!cpu_single_env
);
1808 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
1809 penv
= (CPUState
*) penv
->next_cpu
;
1813 static void kvm_vm_state_change_handler(void *context
, int running
, int reason
)
1816 resume_all_threads();
1818 pause_all_threads();
1821 static void setup_kernel_sigmask(CPUState
*env
)
1826 sigaddset(&set
, SIGUSR2
);
1827 sigaddset(&set
, SIGIO
);
1828 sigaddset(&set
, SIGALRM
);
1829 sigprocmask(SIG_BLOCK
, &set
, NULL
);
1831 sigprocmask(SIG_BLOCK
, NULL
, &set
);
1832 sigdelset(&set
, SIG_IPI
);
1833 sigdelset(&set
, SIGBUS
);
1835 kvm_set_signal_mask(env
, &set
);
1838 static void qemu_kvm_system_reset(void)
1840 CPUState
*penv
= first_cpu
;
1842 pause_all_threads();
1844 qemu_system_reset();
1847 kvm_arch_cpu_reset(penv
);
1848 penv
= (CPUState
*) penv
->next_cpu
;
1851 resume_all_threads();
1854 static void process_irqchip_events(CPUState
*env
)
1856 kvm_arch_process_irqchip_events(env
);
1857 if (kvm_arch_has_work(env
))
1861 static int kvm_main_loop_cpu(CPUState
*env
)
1864 int run_cpu
= !is_cpu_stopped(env
);
1865 if (run_cpu
&& !kvm_irqchip_in_kernel()) {
1866 process_irqchip_events(env
);
1867 run_cpu
= !env
->halted
;
1870 kvm_main_loop_wait(env
, 0);
1873 kvm_main_loop_wait(env
, 1000);
1876 pthread_mutex_unlock(&qemu_mutex
);
1880 static void *ap_main_loop(void *_env
)
1882 CPUState
*env
= _env
;
1884 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1885 struct ioperm_data
*data
= NULL
;
1889 env
->thread_id
= kvm_get_thread_id();
1890 sigfillset(&signals
);
1891 sigprocmask(SIG_BLOCK
, &signals
, NULL
);
1892 kvm_create_vcpu(env
, env
->cpu_index
);
1894 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1895 /* do ioperm for io ports of assigned devices */
1896 QLIST_FOREACH(data
, &ioperm_head
, entries
)
1897 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
1900 setup_kernel_sigmask(env
);
1902 pthread_mutex_lock(&qemu_mutex
);
1903 cpu_single_env
= env
;
1905 kvm_arch_init_vcpu(env
);
1907 kvm_arch_load_regs(env
);
1909 /* signal VCPU creation */
1910 current_env
->created
= 1;
1911 pthread_cond_signal(&qemu_vcpu_cond
);
1913 /* and wait for machine initialization */
1914 while (!qemu_system_ready
)
1915 qemu_cond_wait(&qemu_system_cond
);
1917 /* re-initialize cpu_single_env after re-acquiring qemu_mutex */
1918 cpu_single_env
= env
;
1920 kvm_main_loop_cpu(env
);
1924 void kvm_init_vcpu(CPUState
*env
)
1926 pthread_create(&env
->kvm_cpu_state
.thread
, NULL
, ap_main_loop
, env
);
1928 while (env
->created
== 0)
1929 qemu_cond_wait(&qemu_vcpu_cond
);
1932 int kvm_vcpu_inited(CPUState
*env
)
1934 return env
->created
;
1938 void kvm_hpet_disable_kpit(void)
1940 struct kvm_pit_state2 ps2
;
1942 kvm_get_pit2(kvm_context
, &ps2
);
1943 ps2
.flags
|= KVM_PIT_FLAGS_HPET_LEGACY
;
1944 kvm_set_pit2(kvm_context
, &ps2
);
1947 void kvm_hpet_enable_kpit(void)
1949 struct kvm_pit_state2 ps2
;
1951 kvm_get_pit2(kvm_context
, &ps2
);
1952 ps2
.flags
&= ~KVM_PIT_FLAGS_HPET_LEGACY
;
1953 kvm_set_pit2(kvm_context
, &ps2
);
1957 int kvm_init_ap(void)
1959 struct sigaction action
;
1961 qemu_add_vm_change_state_handler(kvm_vm_state_change_handler
, NULL
);
1963 signal(SIG_IPI
, sig_ipi_handler
);
1965 memset(&action
, 0, sizeof(action
));
1966 action
.sa_flags
= SA_SIGINFO
;
1967 action
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))sigbus_handler
;
1968 sigaction(SIGBUS
, &action
, NULL
);
1969 prctl(PR_MCE_KILL
, 1, 1);
1973 void qemu_kvm_notify_work(void)
1979 if (io_thread_fd
== -1)
1982 memcpy(buffer
, &value
, sizeof(value
));
1984 while (offset
< 8) {
1987 len
= write(io_thread_fd
, buffer
+ offset
, 8 - offset
);
1988 if (len
== -1 && errno
== EINTR
)
1991 /* In case we have a pipe, there is not reason to insist writing
1994 if (len
== -1 && errno
== EAGAIN
)
2004 /* If we have signalfd, we mask out the signals we want to handle and then
2005 * use signalfd to listen for them. We rely on whatever the current signal
2006 * handler is to dispatch the signals when we receive them.
2009 static void sigfd_handler(void *opaque
)
2011 int fd
= (unsigned long) opaque
;
2012 struct qemu_signalfd_siginfo info
;
2013 struct sigaction action
;
2018 len
= read(fd
, &info
, sizeof(info
));
2019 } while (len
== -1 && errno
== EINTR
);
2021 if (len
== -1 && errno
== EAGAIN
)
2024 if (len
!= sizeof(info
)) {
2025 printf("read from sigfd returned %zd: %m\n", len
);
2029 sigaction(info
.ssi_signo
, NULL
, &action
);
2030 if ((action
.sa_flags
& SA_SIGINFO
) && action
.sa_sigaction
)
2031 action
.sa_sigaction(info
.ssi_signo
,
2032 (siginfo_t
*)&info
, NULL
);
2033 else if (action
.sa_handler
)
2034 action
.sa_handler(info
.ssi_signo
);
2039 /* Used to break IO thread out of select */
2040 static void io_thread_wakeup(void *opaque
)
2042 int fd
= (unsigned long) opaque
;
2045 /* Drain the pipe/(eventfd) */
2049 len
= read(fd
, buffer
, sizeof(buffer
));
2050 if (len
== -1 && errno
== EINTR
)
2058 int kvm_main_loop(void)
2064 io_thread
= pthread_self();
2065 qemu_system_ready
= 1;
2067 if (qemu_eventfd(fds
) == -1) {
2068 fprintf(stderr
, "failed to create eventfd\n");
2072 fcntl(fds
[0], F_SETFL
, O_NONBLOCK
);
2073 fcntl(fds
[1], F_SETFL
, O_NONBLOCK
);
2075 qemu_set_fd_handler2(fds
[0], NULL
, io_thread_wakeup
, NULL
,
2076 (void *)(unsigned long) fds
[0]);
2078 io_thread_fd
= fds
[1];
2081 sigaddset(&mask
, SIGIO
);
2082 sigaddset(&mask
, SIGALRM
);
2083 sigaddset(&mask
, SIGBUS
);
2084 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
2086 sigfd
= qemu_signalfd(&mask
);
2088 fprintf(stderr
, "failed to create signalfd\n");
2092 fcntl(sigfd
, F_SETFL
, O_NONBLOCK
);
2094 qemu_set_fd_handler2(sigfd
, NULL
, sigfd_handler
, NULL
,
2095 (void *)(unsigned long) sigfd
);
2097 pthread_cond_broadcast(&qemu_system_cond
);
2099 io_thread_sigfd
= sigfd
;
2100 cpu_single_env
= NULL
;
2103 main_loop_wait(1000);
2104 if (qemu_shutdown_requested()) {
2105 if (qemu_no_shutdown()) {
2109 } else if (qemu_powerdown_requested())
2110 qemu_irq_raise(qemu_system_powerdown
);
2111 else if (qemu_reset_requested())
2112 qemu_kvm_system_reset();
2113 else if (kvm_debug_cpu_requested
) {
2114 gdb_set_stop_cpu(kvm_debug_cpu_requested
);
2115 vm_stop(EXCP_DEBUG
);
2116 kvm_debug_cpu_requested
= NULL
;
2120 pause_all_threads();
2121 pthread_mutex_unlock(&qemu_mutex
);
2127 static int destroy_region_works
= 0;
2131 #if !defined(TARGET_I386)
2132 int kvm_arch_init_irq_routing(void)
2140 static int kvm_create_context(void)
2145 kvm_disable_irqchip_creation(kvm_context
);
2148 kvm_disable_pit_creation(kvm_context
);
2150 if (kvm_create(kvm_context
, 0, NULL
) < 0) {
2151 kvm_finalize(kvm_state
);
2154 r
= kvm_arch_qemu_create_context();
2156 kvm_finalize(kvm_state
);
2157 if (kvm_pit
&& !kvm_pit_reinject
) {
2158 if (kvm_reinject_control(kvm_context
, 0)) {
2159 fprintf(stderr
, "failure to disable in-kernel PIT reinjection\n");
2164 destroy_region_works
= kvm_destroy_memory_region_works(kvm_context
);
2167 r
= kvm_arch_init_irq_routing();
2174 if (!qemu_kvm_has_gsi_routing()) {
2177 /* if kernel can't do irq routing, interrupt source
2178 * override 0->2 can not be set up as required by hpet,
2182 } else if (!qemu_kvm_has_pit_state2()) {
2194 static int must_use_aliases_source(target_phys_addr_t addr
)
2196 if (destroy_region_works
)
2198 if (addr
== 0xa0000 || addr
== 0xa8000)
2203 static int must_use_aliases_target(target_phys_addr_t addr
)
2205 if (destroy_region_works
)
2207 if (addr
>= 0xe0000000 && addr
< 0x100000000ull
)
2212 static struct mapping
{
2213 target_phys_addr_t phys
;
2217 static int nr_mappings
;
2219 static struct mapping
*find_ram_mapping(ram_addr_t ram_addr
)
2223 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
2224 if (p
->ram
<= ram_addr
&& ram_addr
< p
->ram
+ p
->len
) {
2231 static struct mapping
*find_mapping(target_phys_addr_t start_addr
)
2235 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
2236 if (p
->phys
<= start_addr
&& start_addr
< p
->phys
+ p
->len
) {
2243 static void drop_mapping(target_phys_addr_t start_addr
)
2245 struct mapping
*p
= find_mapping(start_addr
);
2248 *p
= mappings
[--nr_mappings
];
2252 void kvm_set_phys_mem(target_phys_addr_t start_addr
, ram_addr_t size
,
2253 ram_addr_t phys_offset
)
2256 unsigned long area_flags
;
2261 if (start_addr
+ size
> phys_ram_size
) {
2262 phys_ram_size
= start_addr
+ size
;
2265 phys_offset
&= ~IO_MEM_ROM
;
2266 area_flags
= phys_offset
& ~TARGET_PAGE_MASK
;
2268 if (area_flags
!= IO_MEM_RAM
) {
2270 if (must_use_aliases_source(start_addr
)) {
2271 kvm_destroy_memory_alias(kvm_context
, start_addr
);
2274 if (must_use_aliases_target(start_addr
))
2278 p
= find_mapping(start_addr
);
2280 kvm_unregister_memory_area(kvm_context
, p
->phys
, p
->len
);
2281 drop_mapping(p
->phys
);
2283 start_addr
+= TARGET_PAGE_SIZE
;
2284 if (size
> TARGET_PAGE_SIZE
) {
2285 size
-= TARGET_PAGE_SIZE
;
2293 r
= kvm_is_containing_region(kvm_context
, start_addr
, size
);
2297 if (area_flags
>= TLB_MMIO
)
2301 if (must_use_aliases_source(start_addr
)) {
2302 p
= find_ram_mapping(phys_offset
);
2304 kvm_create_memory_alias(kvm_context
, start_addr
, size
,
2305 p
->phys
+ (phys_offset
- p
->ram
));
2311 r
= kvm_register_phys_mem(kvm_context
, start_addr
,
2312 qemu_get_ram_ptr(phys_offset
), size
, 0);
2314 printf("kvm_cpu_register_physical_memory: failed\n");
2318 drop_mapping(start_addr
);
2319 p
= &mappings
[nr_mappings
++];
2320 p
->phys
= start_addr
;
2321 p
->ram
= phys_offset
;
2328 int kvm_setup_guest_memory(void *area
, unsigned long size
)
2332 #ifdef MADV_DONTFORK
2333 if (kvm_enabled() && !kvm_has_sync_mmu())
2334 ret
= madvise(area
, size
, MADV_DONTFORK
);
2343 #ifdef KVM_CAP_SET_GUEST_DEBUG
2345 struct kvm_set_guest_debug_data
{
2346 struct kvm_guest_debug dbg
;
2350 static void kvm_invoke_set_guest_debug(void *data
)
2352 struct kvm_set_guest_debug_data
*dbg_data
= data
;
2354 if (cpu_single_env
->kvm_cpu_state
.regs_modified
) {
2355 kvm_arch_put_registers(cpu_single_env
);
2356 cpu_single_env
->kvm_cpu_state
.regs_modified
= 0;
2359 kvm_set_guest_debug(cpu_single_env
,
2363 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
2365 struct kvm_set_guest_debug_data data
;
2367 data
.dbg
.control
= 0;
2368 if (env
->singlestep_enabled
)
2369 data
.dbg
.control
= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
2371 kvm_arch_update_guest_debug(env
, &data
.dbg
);
2372 data
.dbg
.control
|= reinject_trap
;
2374 on_vcpu(env
, kvm_invoke_set_guest_debug
, &data
);
2381 * dirty pages logging
2383 /* FIXME: use unsigned long pointer instead of unsigned char */
2384 unsigned char *kvm_dirty_bitmap
= NULL
;
2385 int kvm_physical_memory_set_dirty_tracking(int enable
)
2393 if (!kvm_dirty_bitmap
) {
2394 unsigned bitmap_size
= BITMAP_SIZE(phys_ram_size
);
2395 kvm_dirty_bitmap
= qemu_malloc(bitmap_size
);
2396 r
= kvm_dirty_pages_log_enable_all(kvm_context
);
2399 if (kvm_dirty_bitmap
) {
2400 r
= kvm_dirty_pages_log_reset(kvm_context
);
2401 qemu_free(kvm_dirty_bitmap
);
2402 kvm_dirty_bitmap
= NULL
;
2408 /* get kvm's dirty pages bitmap and update qemu's */
2409 static int kvm_get_dirty_pages_log_range(unsigned long start_addr
,
2410 unsigned char *bitmap
,
2411 unsigned long offset
,
2412 unsigned long mem_size
)
2414 unsigned int i
, j
, n
= 0;
2416 unsigned long page_number
, addr
, addr1
;
2417 ram_addr_t ram_addr
;
2418 unsigned int len
= ((mem_size
/ TARGET_PAGE_SIZE
) + 7) / 8;
2421 * bitmap-traveling is faster than memory-traveling (for addr...)
2422 * especially when most of the memory is not dirty.
2424 for (i
= 0; i
< len
; i
++) {
2429 page_number
= i
* 8 + j
;
2430 addr1
= page_number
* TARGET_PAGE_SIZE
;
2431 addr
= offset
+ addr1
;
2432 ram_addr
= cpu_get_physical_page_desc(addr
);
2433 cpu_physical_memory_set_dirty(ram_addr
);
2440 static int kvm_get_dirty_bitmap_cb(unsigned long start
, unsigned long len
,
2441 void *bitmap
, void *opaque
)
2443 return kvm_get_dirty_pages_log_range(start
, bitmap
, start
, len
);
2447 * get kvm's dirty pages bitmap and update qemu's
2448 * we only care about physical ram, which resides in slots 0 and 3
2450 int kvm_update_dirty_pages_log(void)
2455 r
= kvm_get_dirty_pages_range(kvm_context
, 0, -1UL, NULL
,
2456 kvm_get_dirty_bitmap_cb
);
2460 void kvm_qemu_log_memory(target_phys_addr_t start
, target_phys_addr_t size
,
2464 kvm_dirty_pages_log_enable_slot(kvm_context
, start
, size
);
2467 if (must_use_aliases_target(start
))
2470 kvm_dirty_pages_log_disable_slot(kvm_context
, start
, size
);
2474 #ifdef KVM_CAP_IRQCHIP
2476 int kvm_set_irq(int irq
, int level
, int *status
)
2478 return kvm_set_irq_level(kvm_context
, irq
, level
, status
);
2483 int qemu_kvm_get_dirty_pages(unsigned long phys_addr
, void *buf
)
2485 return kvm_get_dirty_pages(kvm_context
, phys_addr
, buf
);
2488 void kvm_mutex_unlock(void)
2490 assert(!cpu_single_env
);
2491 pthread_mutex_unlock(&qemu_mutex
);
2494 void kvm_mutex_lock(void)
2496 pthread_mutex_lock(&qemu_mutex
);
2497 cpu_single_env
= NULL
;
2500 void qemu_mutex_unlock_iothread(void)
2506 void qemu_mutex_lock_iothread(void)
2512 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2513 void kvm_add_ioperm_data(struct ioperm_data
*data
)
2515 QLIST_INSERT_HEAD(&ioperm_head
, data
, entries
);
2518 void kvm_remove_ioperm_data(unsigned long start_port
, unsigned long num
)
2520 struct ioperm_data
*data
;
2522 data
= QLIST_FIRST(&ioperm_head
);
2524 struct ioperm_data
*next
= QLIST_NEXT(data
, entries
);
2526 if (data
->start_port
== start_port
&& data
->num
== num
) {
2527 QLIST_REMOVE(data
, entries
);
2535 void kvm_ioperm(CPUState
*env
, void *data
)
2537 if (kvm_enabled() && qemu_system_ready
)
2538 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
2543 int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2544 target_phys_addr_t end_addr
)
2549 if (must_use_aliases_source(start_addr
))
2553 kvm_get_dirty_pages_range(kvm_context
, start_addr
,
2554 end_addr
- start_addr
, NULL
,
2555 kvm_get_dirty_bitmap_cb
);
2560 int kvm_log_start(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
2563 if (must_use_aliases_source(phys_addr
))
2568 kvm_qemu_log_memory(phys_addr
, len
, 1);
2573 int kvm_log_stop(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
2576 if (must_use_aliases_source(phys_addr
))
2581 kvm_qemu_log_memory(phys_addr
, len
, 0);
2586 int kvm_set_boot_cpu_id(uint32_t id
)
2588 return kvm_set_boot_vcpu_id(kvm_context
, id
);
2593 struct kvm_x86_mce_data
{
2595 struct kvm_x86_mce
*mce
;
2599 static void kvm_do_inject_x86_mce(void *_data
)
2601 struct kvm_x86_mce_data
*data
= _data
;
2604 r
= kvm_set_mce(data
->env
, data
->mce
);
2606 perror("kvm_set_mce FAILED");
2607 if (data
->abort_on_error
)
2613 void kvm_inject_x86_mce(CPUState
*cenv
, int bank
, uint64_t status
,
2614 uint64_t mcg_status
, uint64_t addr
, uint64_t misc
,
2618 struct kvm_x86_mce mce
= {
2621 .mcg_status
= mcg_status
,
2625 struct kvm_x86_mce_data data
= {
2628 .abort_on_error
= abort_on_error
,
2631 if (!cenv
->mcg_cap
) {
2632 fprintf(stderr
, "MCE support is not enabled!\n");
2635 on_vcpu(cenv
, kvm_do_inject_x86_mce
, &data
);