4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
15 #include "qemu-common.h"
26 #include <sys/utsname.h>
27 #include <sys/syscall.h>
29 #include <sys/ioctl.h>
31 #include <sys/prctl.h>
37 #define PR_MCE_KILL 33
41 #define BUS_MCEERR_AR 4
44 #define BUS_MCEERR_AO 5
47 #define EXPECTED_KVM_API_VERSION 12
49 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
50 #error libkvm: userspace and kernel version mismatch
55 int kvm_pit_reinject
= 1;
60 kvm_context_t kvm_context
;
62 pthread_mutex_t qemu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
63 pthread_cond_t qemu_vcpu_cond
= PTHREAD_COND_INITIALIZER
;
64 pthread_cond_t qemu_system_cond
= PTHREAD_COND_INITIALIZER
;
65 pthread_cond_t qemu_pause_cond
= PTHREAD_COND_INITIALIZER
;
66 pthread_cond_t qemu_work_cond
= PTHREAD_COND_INITIALIZER
;
67 __thread CPUState
*current_env
;
69 static int qemu_system_ready
;
71 #define SIG_IPI (SIGRTMIN+4)
74 static int io_thread_sigfd
= -1;
76 static CPUState
*kvm_debug_cpu_requested
;
78 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
79 /* The list of ioperm_data */
80 static QLIST_HEAD(, ioperm_data
) ioperm_head
;
83 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
85 int kvm_abi
= EXPECTED_KVM_API_VERSION
;
88 #ifdef KVM_CAP_SET_GUEST_DEBUG
89 static int kvm_debug(CPUState
*env
,
90 struct kvm_debug_exit_arch
*arch_info
)
92 int handle
= kvm_arch_debug(arch_info
);
95 kvm_debug_cpu_requested
= env
;
102 static int handle_unhandled(uint64_t reason
)
104 fprintf(stderr
, "kvm: unhandled exit %" PRIx64
"\n", reason
);
108 #define VMX_INVALID_GUEST_STATE 0x80000021
110 static int handle_failed_vmentry(uint64_t reason
)
112 fprintf(stderr
, "kvm: vm entry failed with error 0x%" PRIx64
"\n\n", reason
);
114 /* Perhaps we will need to check if this machine is intel since exit reason 0x21
115 has a different interpretation on SVM */
116 if (reason
== VMX_INVALID_GUEST_STATE
) {
117 fprintf(stderr
, "If you're runnning a guest on an Intel machine without\n");
118 fprintf(stderr
, "unrestricted mode support, the failure can be most likely\n");
119 fprintf(stderr
, "due to the guest entering an invalid state for Intel VT.\n");
120 fprintf(stderr
, "For example, the guest maybe running in big real mode\n");
121 fprintf(stderr
, "which is not supported on less recent Intel processors.\n\n");
127 static inline void set_gsi(kvm_context_t kvm
, unsigned int gsi
)
129 uint32_t *bitmap
= kvm
->used_gsi_bitmap
;
131 if (gsi
< kvm
->max_gsi
)
132 bitmap
[gsi
/ 32] |= 1U << (gsi
% 32);
134 DPRINTF("Invalid GSI %u\n", gsi
);
137 static inline void clear_gsi(kvm_context_t kvm
, unsigned int gsi
)
139 uint32_t *bitmap
= kvm
->used_gsi_bitmap
;
141 if (gsi
< kvm
->max_gsi
)
142 bitmap
[gsi
/ 32] &= ~(1U << (gsi
% 32));
144 DPRINTF("Invalid GSI %u\n", gsi
);
147 static int kvm_create_context(void);
149 int kvm_init(int smp_cpus
)
155 fd
= open("/dev/kvm", O_RDWR
);
157 perror("open /dev/kvm");
160 r
= ioctl(fd
, KVM_GET_API_VERSION
, 0);
163 "kvm kernel version too old: "
164 "KVM_GET_API_VERSION ioctl not supported\n");
167 if (r
< EXPECTED_KVM_API_VERSION
) {
168 fprintf(stderr
, "kvm kernel version too old: "
169 "We expect API version %d or newer, but got "
170 "version %d\n", EXPECTED_KVM_API_VERSION
, r
);
173 if (r
> EXPECTED_KVM_API_VERSION
) {
174 fprintf(stderr
, "kvm userspace version too old\n");
178 kvm_page_size
= getpagesize();
179 kvm_state
= qemu_mallocz(sizeof(*kvm_state
));
180 kvm_context
= &kvm_state
->kvm_context
;
183 kvm_state
->vmfd
= -1;
184 kvm_context
->opaque
= cpu_single_env
;
185 kvm_context
->dirty_pages_log_all
= 0;
186 kvm_context
->no_irqchip_creation
= 0;
187 kvm_context
->no_pit_creation
= 0;
189 #ifdef KVM_CAP_SET_GUEST_DEBUG
190 QTAILQ_INIT(&kvm_state
->kvm_sw_breakpoints
);
193 gsi_count
= kvm_get_gsi_count(kvm_context
);
197 /* Round up so we can search ints using ffs */
198 gsi_bits
= ALIGN(gsi_count
, 32);
199 kvm_context
->used_gsi_bitmap
= qemu_mallocz(gsi_bits
/ 8);
200 kvm_context
->max_gsi
= gsi_bits
;
202 /* Mark any over-allocated bits as already in use */
203 for (i
= gsi_count
; i
< gsi_bits
; i
++) {
204 set_gsi(kvm_context
, i
);
208 kvm_cpu_register_phys_memory_client();
210 pthread_mutex_lock(&qemu_mutex
);
211 return kvm_create_context();
218 static void kvm_finalize(KVMState
*s
)
221 if (kvm->vcpu_fd[0] != -1)
222 close(kvm->vcpu_fd[0]);
223 if (kvm->vm_fd != -1)
230 void kvm_disable_irqchip_creation(kvm_context_t kvm
)
232 kvm
->no_irqchip_creation
= 1;
235 void kvm_disable_pit_creation(kvm_context_t kvm
)
237 kvm
->no_pit_creation
= 1;
240 static void kvm_reset_vcpu(void *opaque
)
242 CPUState
*env
= opaque
;
244 kvm_arch_cpu_reset(env
);
247 static void kvm_create_vcpu(CPUState
*env
, int id
)
251 KVMState
*s
= kvm_state
;
253 r
= kvm_vm_ioctl(kvm_state
, KVM_CREATE_VCPU
, id
);
255 fprintf(stderr
, "kvm_create_vcpu: %m\n");
256 fprintf(stderr
, "Failed to create vCPU. Check the -smp parameter.\n");
261 env
->kvm_state
= kvm_state
;
263 mmap_size
= kvm_ioctl(kvm_state
, KVM_GET_VCPU_MMAP_SIZE
, 0);
265 fprintf(stderr
, "get vcpu mmap size: %m\n");
269 mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, env
->kvm_fd
,
271 if (env
->kvm_run
== MAP_FAILED
) {
272 fprintf(stderr
, "mmap vcpu area: %m\n");
276 #ifdef KVM_CAP_COALESCED_MMIO
277 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
)
278 s
->coalesced_mmio_ring
= (void *) env
->kvm_run
+
279 s
->coalesced_mmio
* PAGE_SIZE
;
282 r
= kvm_arch_init_vcpu(env
);
284 qemu_register_reset(kvm_reset_vcpu
, env
);
291 /* We're no good with semi-broken states. */
295 static int kvm_set_boot_vcpu_id(kvm_context_t kvm
, uint32_t id
)
297 #ifdef KVM_CAP_SET_BOOT_CPU_ID
298 int r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_BOOT_CPU_ID
);
300 return kvm_vm_ioctl(kvm_state
, KVM_SET_BOOT_CPU_ID
, id
);
308 int kvm_create_vm(kvm_context_t kvm
)
311 #ifdef KVM_CAP_IRQ_ROUTING
312 kvm
->irq_routes
= qemu_mallocz(sizeof(*kvm
->irq_routes
));
313 kvm
->nr_allocated_irq_routes
= 0;
316 fd
= kvm_ioctl(kvm_state
, KVM_CREATE_VM
, 0);
318 fprintf(stderr
, "kvm_create_vm: %m\n");
321 kvm_state
->vmfd
= fd
;
325 static int kvm_create_default_phys_mem(kvm_context_t kvm
,
326 unsigned long phys_mem_bytes
,
329 #ifdef KVM_CAP_USER_MEMORY
330 int r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_USER_MEMORY
);
334 "Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported\n");
336 #error Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported
341 void kvm_create_irqchip(kvm_context_t kvm
)
345 kvm
->irqchip_in_kernel
= 0;
346 #ifdef KVM_CAP_IRQCHIP
347 if (!kvm
->no_irqchip_creation
) {
348 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_IRQCHIP
);
349 if (r
> 0) { /* kernel irqchip supported */
350 r
= kvm_vm_ioctl(kvm_state
, KVM_CREATE_IRQCHIP
);
352 kvm
->irqchip_inject_ioctl
= KVM_IRQ_LINE
;
353 #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
354 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
,
355 KVM_CAP_IRQ_INJECT_STATUS
);
357 kvm
->irqchip_inject_ioctl
= KVM_IRQ_LINE_STATUS
;
360 kvm
->irqchip_in_kernel
= 1;
362 fprintf(stderr
, "Create kernel PIC irqchip failed\n");
366 kvm_state
->irqchip_in_kernel
= kvm
->irqchip_in_kernel
;
369 int kvm_create(kvm_context_t kvm
, unsigned long phys_mem_bytes
, void **vm_mem
)
373 r
= kvm_create_vm(kvm
);
377 r
= kvm_arch_create(kvm
, phys_mem_bytes
, vm_mem
);
381 for (i
= 0; i
< ARRAY_SIZE(kvm_state
->slots
); i
++) {
382 kvm_state
->slots
[i
].slot
= i
;
385 r
= kvm_create_default_phys_mem(kvm
, phys_mem_bytes
, vm_mem
);
390 kvm_create_irqchip(kvm
);
395 #ifdef KVM_CAP_IRQCHIP
397 int kvm_set_irq_level(kvm_context_t kvm
, int irq
, int level
, int *status
)
399 struct kvm_irq_level event
;
402 if (!kvm
->irqchip_in_kernel
) {
407 r
= kvm_vm_ioctl(kvm_state
, kvm
->irqchip_inject_ioctl
, &event
);
409 perror("kvm_set_irq_level");
413 #ifdef KVM_CAP_IRQ_INJECT_STATUS
415 (kvm
->irqchip_inject_ioctl
== KVM_IRQ_LINE
) ? 1 : event
.status
;
424 int kvm_get_irqchip(kvm_context_t kvm
, struct kvm_irqchip
*chip
)
428 if (!kvm
->irqchip_in_kernel
) {
431 r
= kvm_vm_ioctl(kvm_state
, KVM_GET_IRQCHIP
, chip
);
433 perror("kvm_get_irqchip\n");
438 int kvm_set_irqchip(kvm_context_t kvm
, struct kvm_irqchip
*chip
)
442 if (!kvm
->irqchip_in_kernel
) {
445 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_IRQCHIP
, chip
);
447 perror("kvm_set_irqchip\n");
454 static int handle_debug(CPUState
*env
)
456 #ifdef KVM_CAP_SET_GUEST_DEBUG
457 struct kvm_run
*run
= env
->kvm_run
;
459 return kvm_debug(env
, &run
->debug
.arch
);
465 int kvm_get_regs(CPUState
*env
, struct kvm_regs
*regs
)
467 return kvm_vcpu_ioctl(env
, KVM_GET_REGS
, regs
);
470 int kvm_set_regs(CPUState
*env
, struct kvm_regs
*regs
)
472 return kvm_vcpu_ioctl(env
, KVM_SET_REGS
, regs
);
475 int kvm_get_fpu(CPUState
*env
, struct kvm_fpu
*fpu
)
477 return kvm_vcpu_ioctl(env
, KVM_GET_FPU
, fpu
);
480 int kvm_set_fpu(CPUState
*env
, struct kvm_fpu
*fpu
)
482 return kvm_vcpu_ioctl(env
, KVM_SET_FPU
, fpu
);
485 int kvm_get_sregs(CPUState
*env
, struct kvm_sregs
*sregs
)
487 return kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, sregs
);
490 int kvm_set_sregs(CPUState
*env
, struct kvm_sregs
*sregs
)
492 return kvm_vcpu_ioctl(env
, KVM_SET_SREGS
, sregs
);
495 #ifdef KVM_CAP_MP_STATE
496 int kvm_get_mpstate(CPUState
*env
, struct kvm_mp_state
*mp_state
)
500 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_MP_STATE
);
502 return kvm_vcpu_ioctl(env
, KVM_GET_MP_STATE
, mp_state
);
507 int kvm_set_mpstate(CPUState
*env
, struct kvm_mp_state
*mp_state
)
511 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_MP_STATE
);
513 return kvm_vcpu_ioctl(env
, KVM_SET_MP_STATE
, mp_state
);
520 int kvm_get_xsave(CPUState
*env
, struct kvm_xsave
*xsave
)
522 return kvm_vcpu_ioctl(env
, KVM_GET_XSAVE
, xsave
);
525 int kvm_set_xsave(CPUState
*env
, struct kvm_xsave
*xsave
)
527 return kvm_vcpu_ioctl(env
, KVM_SET_XSAVE
, xsave
);
532 int kvm_get_xcrs(CPUState
*env
, struct kvm_xcrs
*xcrs
)
534 return kvm_vcpu_ioctl(env
, KVM_GET_XCRS
, xcrs
);
537 int kvm_set_xcrs(CPUState
*env
, struct kvm_xcrs
*xcrs
)
539 return kvm_vcpu_ioctl(env
, KVM_SET_XCRS
, xcrs
);
543 static int handle_mmio(CPUState
*env
)
545 unsigned long addr
= env
->kvm_run
->mmio
.phys_addr
;
546 struct kvm_run
*kvm_run
= env
->kvm_run
;
547 void *data
= kvm_run
->mmio
.data
;
549 /* hack: Red Hat 7.1 generates these weird accesses. */
550 if ((addr
> 0xa0000 - 4 && addr
<= 0xa0000) && kvm_run
->mmio
.len
== 3) {
554 cpu_physical_memory_rw(addr
, data
, kvm_run
->mmio
.len
, kvm_run
->mmio
.is_write
);
558 int handle_io_window(kvm_context_t kvm
)
563 int handle_shutdown(kvm_context_t kvm
, CPUState
*env
)
565 /* stop the current vcpu from going back to guest mode */
568 qemu_system_reset_request();
572 static inline void push_nmi(kvm_context_t kvm
)
574 #ifdef KVM_CAP_USER_NMI
575 kvm_arch_push_nmi(kvm
->opaque
);
576 #endif /* KVM_CAP_USER_NMI */
579 void post_kvm_run(kvm_context_t kvm
, CPUState
*env
)
581 pthread_mutex_lock(&qemu_mutex
);
582 kvm_arch_post_run(env
, env
->kvm_run
);
583 cpu_single_env
= env
;
586 int pre_kvm_run(kvm_context_t kvm
, CPUState
*env
)
588 kvm_arch_pre_run(env
, env
->kvm_run
);
590 pthread_mutex_unlock(&qemu_mutex
);
594 int kvm_is_ready_for_interrupt_injection(CPUState
*env
)
596 return env
->kvm_run
->ready_for_interrupt_injection
;
599 int kvm_run(CPUState
*env
)
602 kvm_context_t kvm
= &env
->kvm_state
->kvm_context
;
603 struct kvm_run
*run
= env
->kvm_run
;
604 int fd
= env
->kvm_fd
;
607 if (env
->kvm_vcpu_dirty
) {
608 kvm_arch_load_regs(env
, KVM_PUT_RUNTIME_STATE
);
609 env
->kvm_vcpu_dirty
= 0;
612 #if !defined(__s390__)
613 if (!kvm
->irqchip_in_kernel
) {
614 run
->request_interrupt_window
= kvm_arch_try_push_interrupts(env
);
618 r
= pre_kvm_run(kvm
, env
);
622 if (env
->exit_request
) {
623 env
->exit_request
= 0;
624 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
626 r
= ioctl(fd
, KVM_RUN
, 0);
628 if (r
== -1 && errno
!= EINTR
&& errno
!= EAGAIN
) {
630 post_kvm_run(kvm
, env
);
631 fprintf(stderr
, "kvm_run: %s\n", strerror(-r
));
635 post_kvm_run(kvm
, env
);
637 kvm_flush_coalesced_mmio_buffer();
639 #if !defined(__s390__)
641 r
= handle_io_window(kvm
);
646 switch (run
->exit_reason
) {
647 case KVM_EXIT_UNKNOWN
:
648 r
= handle_unhandled(run
->hw
.hardware_exit_reason
);
650 case KVM_EXIT_FAIL_ENTRY
:
651 r
= handle_failed_vmentry(run
->fail_entry
.hardware_entry_failure_reason
);
653 case KVM_EXIT_EXCEPTION
:
654 fprintf(stderr
, "exception %d (%x)\n", run
->ex
.exception
,
661 r
= kvm_handle_io(run
->io
.port
,
662 (uint8_t *)run
+ run
->io
.data_offset
,
669 r
= handle_debug(env
);
672 r
= handle_mmio(env
);
675 r
= kvm_arch_halt(env
);
677 case KVM_EXIT_IRQ_WINDOW_OPEN
:
679 case KVM_EXIT_SHUTDOWN
:
680 r
= handle_shutdown(kvm
, env
);
682 #if defined(__s390__)
683 case KVM_EXIT_S390_SIEIC
:
684 r
= kvm_s390_handle_intercept(kvm
, env
, run
);
686 case KVM_EXIT_S390_RESET
:
687 r
= kvm_s390_handle_reset(kvm
, env
, run
);
690 case KVM_EXIT_INTERNAL_ERROR
:
691 kvm_handle_internal_error(env
, run
);
695 if (kvm_arch_run(env
)) {
696 fprintf(stderr
, "unhandled vm exit: 0x%x\n", run
->exit_reason
);
710 int kvm_inject_irq(CPUState
*env
, unsigned irq
)
712 struct kvm_interrupt intr
;
715 return kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &intr
);
718 int kvm_inject_nmi(CPUState
*env
)
720 #ifdef KVM_CAP_USER_NMI
721 return kvm_vcpu_ioctl(env
, KVM_NMI
);
727 int kvm_init_coalesced_mmio(kvm_context_t kvm
)
730 kvm_state
->coalesced_mmio
= 0;
731 #ifdef KVM_CAP_COALESCED_MMIO
732 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_COALESCED_MMIO
);
734 kvm_state
->coalesced_mmio
= r
;
741 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
742 int kvm_assign_pci_device(kvm_context_t kvm
,
743 struct kvm_assigned_pci_dev
*assigned_dev
)
745 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_PCI_DEVICE
, assigned_dev
);
748 static int kvm_old_assign_irq(kvm_context_t kvm
,
749 struct kvm_assigned_irq
*assigned_irq
)
751 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_IRQ
, assigned_irq
);
754 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
755 int kvm_assign_irq(kvm_context_t kvm
, struct kvm_assigned_irq
*assigned_irq
)
759 ret
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_ASSIGN_DEV_IRQ
);
761 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_DEV_IRQ
, assigned_irq
);
764 return kvm_old_assign_irq(kvm
, assigned_irq
);
767 int kvm_deassign_irq(kvm_context_t kvm
, struct kvm_assigned_irq
*assigned_irq
)
769 return kvm_vm_ioctl(kvm_state
, KVM_DEASSIGN_DEV_IRQ
, assigned_irq
);
772 int kvm_assign_irq(kvm_context_t kvm
, struct kvm_assigned_irq
*assigned_irq
)
774 return kvm_old_assign_irq(kvm
, assigned_irq
);
779 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
780 int kvm_deassign_pci_device(kvm_context_t kvm
,
781 struct kvm_assigned_pci_dev
*assigned_dev
)
783 return kvm_vm_ioctl(kvm_state
, KVM_DEASSIGN_PCI_DEVICE
, assigned_dev
);
787 int kvm_reinject_control(kvm_context_t kvm
, int pit_reinject
)
789 #ifdef KVM_CAP_REINJECT_CONTROL
791 struct kvm_reinject_control control
;
793 control
.pit_reinject
= pit_reinject
;
795 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_REINJECT_CONTROL
);
797 return kvm_vm_ioctl(kvm_state
, KVM_REINJECT_CONTROL
, &control
);
803 int kvm_has_gsi_routing(kvm_context_t kvm
)
807 #ifdef KVM_CAP_IRQ_ROUTING
808 r
= kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
813 int kvm_get_gsi_count(kvm_context_t kvm
)
815 #ifdef KVM_CAP_IRQ_ROUTING
816 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
822 int kvm_clear_gsi_routes(kvm_context_t kvm
)
824 #ifdef KVM_CAP_IRQ_ROUTING
825 kvm
->irq_routes
->nr
= 0;
832 int kvm_add_routing_entry(kvm_context_t kvm
,
833 struct kvm_irq_routing_entry
*entry
)
835 #ifdef KVM_CAP_IRQ_ROUTING
836 struct kvm_irq_routing
*z
;
837 struct kvm_irq_routing_entry
*new;
840 if (kvm
->irq_routes
->nr
== kvm
->nr_allocated_irq_routes
) {
841 n
= kvm
->nr_allocated_irq_routes
* 2;
845 size
= sizeof(struct kvm_irq_routing
);
846 size
+= n
* sizeof(*new);
847 z
= realloc(kvm
->irq_routes
, size
);
851 kvm
->nr_allocated_irq_routes
= n
;
854 n
= kvm
->irq_routes
->nr
++;
855 new = &kvm
->irq_routes
->entries
[n
];
856 memset(new, 0, sizeof(*new));
857 new->gsi
= entry
->gsi
;
858 new->type
= entry
->type
;
859 new->flags
= entry
->flags
;
862 set_gsi(kvm
, entry
->gsi
);
870 int kvm_add_irq_route(kvm_context_t kvm
, int gsi
, int irqchip
, int pin
)
872 #ifdef KVM_CAP_IRQ_ROUTING
873 struct kvm_irq_routing_entry e
;
876 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
878 e
.u
.irqchip
.irqchip
= irqchip
;
879 e
.u
.irqchip
.pin
= pin
;
880 return kvm_add_routing_entry(kvm
, &e
);
886 int kvm_del_routing_entry(kvm_context_t kvm
,
887 struct kvm_irq_routing_entry
*entry
)
889 #ifdef KVM_CAP_IRQ_ROUTING
890 struct kvm_irq_routing_entry
*e
, *p
;
891 int i
, gsi
, found
= 0;
895 for (i
= 0; i
< kvm
->irq_routes
->nr
; ++i
) {
896 e
= &kvm
->irq_routes
->entries
[i
];
897 if (e
->type
== entry
->type
&& e
->gsi
== gsi
) {
899 case KVM_IRQ_ROUTING_IRQCHIP
:{
900 if (e
->u
.irqchip
.irqchip
==
901 entry
->u
.irqchip
.irqchip
902 && e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
903 p
= &kvm
->irq_routes
->entries
[--kvm
->irq_routes
->nr
];
909 case KVM_IRQ_ROUTING_MSI
:{
910 if (e
->u
.msi
.address_lo
==
911 entry
->u
.msi
.address_lo
912 && e
->u
.msi
.address_hi
==
913 entry
->u
.msi
.address_hi
914 && e
->u
.msi
.data
== entry
->u
.msi
.data
) {
915 p
= &kvm
->irq_routes
->entries
[--kvm
->irq_routes
->nr
];
925 /* If there are no other users of this GSI
926 * mark it available in the bitmap */
927 for (i
= 0; i
< kvm
->irq_routes
->nr
; i
++) {
928 e
= &kvm
->irq_routes
->entries
[i
];
932 if (i
== kvm
->irq_routes
->nr
) {
946 int kvm_update_routing_entry(kvm_context_t kvm
,
947 struct kvm_irq_routing_entry
*entry
,
948 struct kvm_irq_routing_entry
*newentry
)
950 #ifdef KVM_CAP_IRQ_ROUTING
951 struct kvm_irq_routing_entry
*e
;
954 if (entry
->gsi
!= newentry
->gsi
|| entry
->type
!= newentry
->type
) {
958 for (i
= 0; i
< kvm
->irq_routes
->nr
; ++i
) {
959 e
= &kvm
->irq_routes
->entries
[i
];
960 if (e
->type
!= entry
->type
|| e
->gsi
!= entry
->gsi
) {
964 case KVM_IRQ_ROUTING_IRQCHIP
:
965 if (e
->u
.irqchip
.irqchip
== entry
->u
.irqchip
.irqchip
&&
966 e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
967 memcpy(&e
->u
.irqchip
, &newentry
->u
.irqchip
,
968 sizeof e
->u
.irqchip
);
972 case KVM_IRQ_ROUTING_MSI
:
973 if (e
->u
.msi
.address_lo
== entry
->u
.msi
.address_lo
&&
974 e
->u
.msi
.address_hi
== entry
->u
.msi
.address_hi
&&
975 e
->u
.msi
.data
== entry
->u
.msi
.data
) {
976 memcpy(&e
->u
.msi
, &newentry
->u
.msi
, sizeof e
->u
.msi
);
990 int kvm_del_irq_route(kvm_context_t kvm
, int gsi
, int irqchip
, int pin
)
992 #ifdef KVM_CAP_IRQ_ROUTING
993 struct kvm_irq_routing_entry e
;
996 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
998 e
.u
.irqchip
.irqchip
= irqchip
;
999 e
.u
.irqchip
.pin
= pin
;
1000 return kvm_del_routing_entry(kvm
, &e
);
1006 int kvm_commit_irq_routes(kvm_context_t kvm
)
1008 #ifdef KVM_CAP_IRQ_ROUTING
1009 kvm
->irq_routes
->flags
= 0;
1010 return kvm_vm_ioctl(kvm_state
, KVM_SET_GSI_ROUTING
, kvm
->irq_routes
);
1016 int kvm_get_irq_route_gsi(kvm_context_t kvm
)
1019 uint32_t *buf
= kvm
->used_gsi_bitmap
;
1021 /* Return the lowest unused GSI in the bitmap */
1022 for (i
= 0; i
< kvm
->max_gsi
/ 32; i
++) {
1028 return bit
- 1 + i
* 32;
1034 #ifdef KVM_CAP_DEVICE_MSIX
1035 int kvm_assign_set_msix_nr(kvm_context_t kvm
,
1036 struct kvm_assigned_msix_nr
*msix_nr
)
1038 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_SET_MSIX_NR
, msix_nr
);
1041 int kvm_assign_set_msix_entry(kvm_context_t kvm
,
1042 struct kvm_assigned_msix_entry
*entry
)
1044 return kvm_vm_ioctl(kvm_state
, KVM_ASSIGN_SET_MSIX_ENTRY
, entry
);
1048 #if defined(KVM_CAP_IRQFD) && defined(CONFIG_EVENTFD)
1050 #include <sys/eventfd.h>
1052 static int _kvm_irqfd(kvm_context_t kvm
, int fd
, int gsi
, int flags
)
1054 struct kvm_irqfd data
= {
1060 return kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &data
);
1063 int kvm_irqfd(kvm_context_t kvm
, int gsi
, int flags
)
1068 if (!kvm_check_extension(kvm_state
, KVM_CAP_IRQFD
))
1076 r
= _kvm_irqfd(kvm
, fd
, gsi
, 0);
1085 #else /* KVM_CAP_IRQFD */
1087 int kvm_irqfd(kvm_context_t kvm
, int gsi
, int flags
)
1092 #endif /* KVM_CAP_IRQFD */
1093 unsigned long kvm_get_thread_id(void)
1095 return syscall(SYS_gettid
);
1098 static void qemu_cond_wait(pthread_cond_t
*cond
)
1100 CPUState
*env
= cpu_single_env
;
1102 pthread_cond_wait(cond
, &qemu_mutex
);
1103 cpu_single_env
= env
;
1106 static void sig_ipi_handler(int n
)
1110 static void hardware_memory_error(void)
1112 fprintf(stderr
, "Hardware memory error!\n");
1116 static void sigbus_reraise(void)
1119 struct sigaction action
;
1121 memset(&action
, 0, sizeof(action
));
1122 action
.sa_handler
= SIG_DFL
;
1123 if (!sigaction(SIGBUS
, &action
, NULL
)) {
1126 sigaddset(&set
, SIGBUS
);
1127 sigprocmask(SIG_UNBLOCK
, &set
, NULL
);
1129 perror("Failed to re-raise SIGBUS!\n");
1133 static void sigbus_handler(int n
, struct qemu_signalfd_siginfo
*siginfo
,
1136 #if defined(KVM_CAP_MCE) && defined(TARGET_I386)
1137 if (first_cpu
->mcg_cap
&& siginfo
->ssi_addr
1138 && siginfo
->ssi_code
== BUS_MCEERR_AO
) {
1141 ram_addr_t ram_addr
;
1142 unsigned long paddr
;
1145 /* Hope we are lucky for AO MCE */
1146 vaddr
= (void *)(intptr_t)siginfo
->ssi_addr
;
1147 if (do_qemu_ram_addr_from_host(vaddr
, &ram_addr
) ||
1148 !kvm_physical_memory_addr_from_ram(kvm_state
, ram_addr
, &paddr
)) {
1149 fprintf(stderr
, "Hardware memory error for memory used by "
1150 "QEMU itself instead of guest system!: %llx\n",
1151 (unsigned long long)siginfo
->ssi_addr
);
1154 status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
1155 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
1157 kvm_inject_x86_mce(first_cpu
, 9, status
,
1158 MCG_STATUS_MCIP
| MCG_STATUS_RIPV
, paddr
,
1159 (MCM_ADDR_PHYS
<< 6) | 0xc, 1);
1160 for (cenv
= first_cpu
->next_cpu
; cenv
!= NULL
; cenv
= cenv
->next_cpu
) {
1161 kvm_inject_x86_mce(cenv
, 1, MCI_STATUS_VAL
| MCI_STATUS_UC
,
1162 MCG_STATUS_MCIP
| MCG_STATUS_RIPV
, 0, 0, 1);
1167 if (siginfo
->ssi_code
== BUS_MCEERR_AO
) {
1169 } else if (siginfo
->ssi_code
== BUS_MCEERR_AR
) {
1170 hardware_memory_error();
1177 static void on_vcpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
1179 struct qemu_work_item wi
;
1181 if (env
== current_env
) {
1188 if (!env
->kvm_cpu_state
.queued_work_first
) {
1189 env
->kvm_cpu_state
.queued_work_first
= &wi
;
1191 env
->kvm_cpu_state
.queued_work_last
->next
= &wi
;
1193 env
->kvm_cpu_state
.queued_work_last
= &wi
;
1197 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
1199 qemu_cond_wait(&qemu_work_cond
);
1203 static void do_kvm_cpu_synchronize_state(void *_env
)
1205 CPUState
*env
= _env
;
1207 if (!env
->kvm_vcpu_dirty
) {
1208 kvm_arch_save_regs(env
);
1209 env
->kvm_vcpu_dirty
= 1;
1213 void kvm_cpu_synchronize_state(CPUState
*env
)
1215 if (!env
->kvm_vcpu_dirty
) {
1216 on_vcpu(env
, do_kvm_cpu_synchronize_state
, env
);
1220 void kvm_cpu_synchronize_post_reset(CPUState
*env
)
1222 kvm_arch_load_regs(env
, KVM_PUT_RESET_STATE
);
1223 env
->kvm_vcpu_dirty
= 0;
1226 void kvm_cpu_synchronize_post_init(CPUState
*env
)
1228 kvm_arch_load_regs(env
, KVM_PUT_FULL_STATE
);
1229 env
->kvm_vcpu_dirty
= 0;
1232 static void inject_interrupt(void *data
)
1234 cpu_interrupt(current_env
, (long) data
);
1237 void kvm_inject_interrupt(CPUState
*env
, int mask
)
1239 on_vcpu(env
, inject_interrupt
, (void *) (long) mask
);
1242 void kvm_update_interrupt_request(CPUState
*env
)
1247 if (!current_env
|| !current_env
->created
) {
1251 * Testing for created here is really redundant
1253 if (current_env
&& current_env
->created
&&
1254 env
!= current_env
&& !env
->kvm_cpu_state
.signalled
) {
1259 env
->kvm_cpu_state
.signalled
= 1;
1260 if (env
->kvm_cpu_state
.thread
) {
1261 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
1267 int kvm_cpu_exec(CPUState
*env
)
1273 printf("kvm_run returned %d\n", r
);
1280 int kvm_cpu_is_stopped(CPUState
*env
)
1282 return !vm_running
|| env
->stopped
;
1285 static void flush_queued_work(CPUState
*env
)
1287 struct qemu_work_item
*wi
;
1289 if (!env
->kvm_cpu_state
.queued_work_first
) {
1293 while ((wi
= env
->kvm_cpu_state
.queued_work_first
)) {
1294 env
->kvm_cpu_state
.queued_work_first
= wi
->next
;
1298 env
->kvm_cpu_state
.queued_work_last
= NULL
;
1299 pthread_cond_broadcast(&qemu_work_cond
);
1302 static int kvm_mce_in_exception(CPUState
*env
)
1304 struct kvm_msr_entry msr_mcg_status
= {
1305 .index
= MSR_MCG_STATUS
,
1309 r
= kvm_get_msrs(env
, &msr_mcg_status
, 1);
1310 if (r
== -1 || r
== 0) {
1313 return !!(msr_mcg_status
.data
& MCG_STATUS_MCIP
);
1316 static void kvm_on_sigbus(CPUState
*env
, siginfo_t
*siginfo
)
1318 #if defined(KVM_CAP_MCE) && defined(TARGET_I386)
1319 struct kvm_x86_mce mce
= {
1323 ram_addr_t ram_addr
;
1324 unsigned long paddr
;
1327 if (env
->mcg_cap
&& siginfo
->si_addr
1328 && (siginfo
->si_code
== BUS_MCEERR_AR
1329 || siginfo
->si_code
== BUS_MCEERR_AO
)) {
1330 if (siginfo
->si_code
== BUS_MCEERR_AR
) {
1331 /* Fake an Intel architectural Data Load SRAR UCR */
1332 mce
.status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
1333 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
1334 | MCI_STATUS_AR
| 0x134;
1335 mce
.misc
= (MCM_ADDR_PHYS
<< 6) | 0xc;
1336 mce
.mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_EIPV
;
1339 * If there is an MCE excpetion being processed, ignore
1342 r
= kvm_mce_in_exception(env
);
1344 fprintf(stderr
, "Failed to get MCE status\n");
1348 /* Fake an Intel architectural Memory scrubbing UCR */
1349 mce
.status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
1350 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
1352 mce
.misc
= (MCM_ADDR_PHYS
<< 6) | 0xc;
1353 mce
.mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_RIPV
;
1355 vaddr
= (void *)siginfo
->si_addr
;
1356 if (do_qemu_ram_addr_from_host(vaddr
, &ram_addr
) ||
1357 !kvm_physical_memory_addr_from_ram(kvm_state
, ram_addr
, &paddr
)) {
1358 fprintf(stderr
, "Hardware memory error for memory used by "
1359 "QEMU itself instaed of guest system!\n");
1360 /* Hope we are lucky for AO MCE */
1361 if (siginfo
->si_code
== BUS_MCEERR_AO
) {
1364 hardware_memory_error();
1368 r
= kvm_set_mce(env
, &mce
);
1370 fprintf(stderr
, "kvm_set_mce: %s\n", strerror(errno
));
1376 if (siginfo
->si_code
== BUS_MCEERR_AO
) {
1378 } else if (siginfo
->si_code
== BUS_MCEERR_AR
) {
1379 hardware_memory_error();
1386 static void kvm_main_loop_wait(CPUState
*env
, int timeout
)
1394 ts
.tv_sec
= timeout
/ 1000;
1395 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
1396 sigemptyset(&waitset
);
1397 sigaddset(&waitset
, SIG_IPI
);
1398 sigaddset(&waitset
, SIGBUS
);
1401 pthread_mutex_unlock(&qemu_mutex
);
1403 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
1406 pthread_mutex_lock(&qemu_mutex
);
1408 if (r
== -1 && !(e
== EAGAIN
|| e
== EINTR
)) {
1409 printf("sigtimedwait: %s\n", strerror(e
));
1415 kvm_on_sigbus(env
, &siginfo
);
1421 r
= sigpending(&chkset
);
1423 printf("sigpending: %s\n", strerror(e
));
1426 } while (sigismember(&chkset
, SIG_IPI
) || sigismember(&chkset
, SIGBUS
));
1428 cpu_single_env
= env
;
1429 flush_queued_work(env
);
1434 pthread_cond_signal(&qemu_pause_cond
);
1437 env
->kvm_cpu_state
.signalled
= 0;
1440 static int all_threads_paused(void)
1442 CPUState
*penv
= first_cpu
;
1448 penv
= (CPUState
*) penv
->next_cpu
;
1454 static void pause_all_threads(void)
1456 CPUState
*penv
= first_cpu
;
1459 if (penv
!= cpu_single_env
) {
1461 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
1467 penv
= (CPUState
*) penv
->next_cpu
;
1470 while (!all_threads_paused()) {
1471 qemu_cond_wait(&qemu_pause_cond
);
1475 static void resume_all_threads(void)
1477 CPUState
*penv
= first_cpu
;
1479 assert(!cpu_single_env
);
1484 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
1485 penv
= (CPUState
*) penv
->next_cpu
;
1489 static void kvm_vm_state_change_handler(void *context
, int running
, int reason
)
1492 resume_all_threads();
1494 pause_all_threads();
1498 static void setup_kernel_sigmask(CPUState
*env
)
1503 sigaddset(&set
, SIGUSR2
);
1504 sigaddset(&set
, SIGIO
);
1505 sigaddset(&set
, SIGALRM
);
1506 sigprocmask(SIG_BLOCK
, &set
, NULL
);
1508 sigprocmask(SIG_BLOCK
, NULL
, &set
);
1509 sigdelset(&set
, SIG_IPI
);
1510 sigdelset(&set
, SIGBUS
);
1512 kvm_set_signal_mask(env
, &set
);
1515 static void qemu_kvm_system_reset(void)
1517 pause_all_threads();
1519 qemu_system_reset();
1521 resume_all_threads();
1524 static void process_irqchip_events(CPUState
*env
)
1526 kvm_arch_process_irqchip_events(env
);
1527 if (kvm_arch_has_work(env
))
1531 static int kvm_main_loop_cpu(CPUState
*env
)
1534 int run_cpu
= !kvm_cpu_is_stopped(env
);
1535 if (run_cpu
&& !kvm_irqchip_in_kernel()) {
1536 process_irqchip_events(env
);
1537 run_cpu
= !env
->halted
;
1541 kvm_main_loop_wait(env
, 0);
1543 kvm_main_loop_wait(env
, 1000);
1546 pthread_mutex_unlock(&qemu_mutex
);
1550 static void *ap_main_loop(void *_env
)
1552 CPUState
*env
= _env
;
1554 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1555 struct ioperm_data
*data
= NULL
;
1559 env
->thread_id
= kvm_get_thread_id();
1560 sigfillset(&signals
);
1561 sigprocmask(SIG_BLOCK
, &signals
, NULL
);
1563 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1564 /* do ioperm for io ports of assigned devices */
1565 QLIST_FOREACH(data
, &ioperm_head
, entries
)
1566 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
1569 pthread_mutex_lock(&qemu_mutex
);
1570 cpu_single_env
= env
;
1572 kvm_create_vcpu(env
, env
->cpu_index
);
1573 setup_kernel_sigmask(env
);
1575 /* signal VCPU creation */
1576 current_env
->created
= 1;
1577 pthread_cond_signal(&qemu_vcpu_cond
);
1579 /* and wait for machine initialization */
1580 while (!qemu_system_ready
) {
1581 qemu_cond_wait(&qemu_system_cond
);
1584 /* re-initialize cpu_single_env after re-acquiring qemu_mutex */
1585 cpu_single_env
= env
;
1587 kvm_main_loop_cpu(env
);
1591 int kvm_init_vcpu(CPUState
*env
)
1593 pthread_create(&env
->kvm_cpu_state
.thread
, NULL
, ap_main_loop
, env
);
1595 while (env
->created
== 0) {
1596 qemu_cond_wait(&qemu_vcpu_cond
);
1602 int kvm_vcpu_inited(CPUState
*env
)
1604 return env
->created
;
1608 void kvm_hpet_disable_kpit(void)
1610 struct kvm_pit_state2 ps2
;
1612 kvm_get_pit2(kvm_context
, &ps2
);
1613 ps2
.flags
|= KVM_PIT_FLAGS_HPET_LEGACY
;
1614 kvm_set_pit2(kvm_context
, &ps2
);
1617 void kvm_hpet_enable_kpit(void)
1619 struct kvm_pit_state2 ps2
;
1621 kvm_get_pit2(kvm_context
, &ps2
);
1622 ps2
.flags
&= ~KVM_PIT_FLAGS_HPET_LEGACY
;
1623 kvm_set_pit2(kvm_context
, &ps2
);
1627 int kvm_init_ap(void)
1629 struct sigaction action
;
1631 qemu_add_vm_change_state_handler(kvm_vm_state_change_handler
, NULL
);
1633 signal(SIG_IPI
, sig_ipi_handler
);
1635 memset(&action
, 0, sizeof(action
));
1636 action
.sa_flags
= SA_SIGINFO
;
1637 action
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))sigbus_handler
;
1638 sigaction(SIGBUS
, &action
, NULL
);
1639 prctl(PR_MCE_KILL
, 1, 1, 0, 0);
1643 /* If we have signalfd, we mask out the signals we want to handle and then
1644 * use signalfd to listen for them. We rely on whatever the current signal
1645 * handler is to dispatch the signals when we receive them.
1648 static void sigfd_handler(void *opaque
)
1650 int fd
= (unsigned long) opaque
;
1651 struct qemu_signalfd_siginfo info
;
1652 struct sigaction action
;
1657 len
= read(fd
, &info
, sizeof(info
));
1658 } while (len
== -1 && errno
== EINTR
);
1660 if (len
== -1 && errno
== EAGAIN
) {
1664 if (len
!= sizeof(info
)) {
1665 printf("read from sigfd returned %zd: %m\n", len
);
1669 sigaction(info
.ssi_signo
, NULL
, &action
);
1670 if ((action
.sa_flags
& SA_SIGINFO
) && action
.sa_sigaction
) {
1671 action
.sa_sigaction(info
.ssi_signo
,
1672 (siginfo_t
*)&info
, NULL
);
1673 } else if (action
.sa_handler
) {
1674 action
.sa_handler(info
.ssi_signo
);
1679 int kvm_main_loop(void)
1684 io_thread
= pthread_self();
1685 qemu_system_ready
= 1;
1688 sigaddset(&mask
, SIGIO
);
1689 sigaddset(&mask
, SIGALRM
);
1690 sigaddset(&mask
, SIGBUS
);
1691 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
1693 sigfd
= qemu_signalfd(&mask
);
1695 fprintf(stderr
, "failed to create signalfd\n");
1699 fcntl(sigfd
, F_SETFL
, O_NONBLOCK
);
1701 qemu_set_fd_handler2(sigfd
, NULL
, sigfd_handler
, NULL
,
1702 (void *)(unsigned long) sigfd
);
1704 pthread_cond_broadcast(&qemu_system_cond
);
1706 io_thread_sigfd
= sigfd
;
1707 cpu_single_env
= NULL
;
1711 if (qemu_shutdown_requested()) {
1712 monitor_protocol_event(QEVENT_SHUTDOWN
, NULL
);
1713 if (qemu_no_shutdown()) {
1718 } else if (qemu_powerdown_requested()) {
1719 monitor_protocol_event(QEVENT_POWERDOWN
, NULL
);
1720 qemu_irq_raise(qemu_system_powerdown
);
1721 } else if (qemu_reset_requested()) {
1722 qemu_kvm_system_reset();
1723 } else if (kvm_debug_cpu_requested
) {
1724 gdb_set_stop_cpu(kvm_debug_cpu_requested
);
1725 vm_stop(EXCP_DEBUG
);
1726 kvm_debug_cpu_requested
= NULL
;
1730 pause_all_threads();
1731 pthread_mutex_unlock(&qemu_mutex
);
1736 #if !defined(TARGET_I386)
1737 int kvm_arch_init_irq_routing(void)
1745 static int kvm_create_context(void)
1747 static const char upgrade_note
[] =
1748 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1749 "(see http://sourceforge.net/projects/kvm).\n";
1754 kvm_disable_irqchip_creation(kvm_context
);
1757 kvm_disable_pit_creation(kvm_context
);
1759 if (kvm_create(kvm_context
, 0, NULL
) < 0) {
1760 kvm_finalize(kvm_state
);
1763 r
= kvm_arch_qemu_create_context();
1765 kvm_finalize(kvm_state
);
1768 if (kvm_pit
&& !kvm_pit_reinject
) {
1769 if (kvm_reinject_control(kvm_context
, 0)) {
1770 fprintf(stderr
, "failure to disable in-kernel PIT reinjection\n");
1775 /* There was a nasty bug in < kvm-80 that prevents memory slots from being
1776 * destroyed properly. Since we rely on this capability, refuse to work
1777 * with any kernel without this capability. */
1778 if (!kvm_check_extension(kvm_state
, KVM_CAP_DESTROY_MEMORY_REGION_WORKS
)) {
1780 "KVM kernel module broken (DESTROY_MEMORY_REGION).\n%s",
1785 r
= kvm_arch_init_irq_routing();
1790 kvm_state
->vcpu_events
= 0;
1791 #ifdef KVM_CAP_VCPU_EVENTS
1792 kvm_state
->vcpu_events
= kvm_check_extension(kvm_state
, KVM_CAP_VCPU_EVENTS
);
1795 kvm_state
->debugregs
= 0;
1796 #ifdef KVM_CAP_DEBUGREGS
1797 kvm_state
->debugregs
= kvm_check_extension(kvm_state
, KVM_CAP_DEBUGREGS
);
1802 if (!qemu_kvm_has_gsi_routing()) {
1805 /* if kernel can't do irq routing, interrupt source
1806 * override 0->2 can not be set up as required by hpet,
1810 } else if (!qemu_kvm_has_pit_state2()) {
1821 #ifdef KVM_CAP_IRQCHIP
1823 int kvm_set_irq(int irq
, int level
, int *status
)
1825 return kvm_set_irq_level(kvm_context
, irq
, level
, status
);
1830 static void kvm_mutex_unlock(void)
1832 assert(!cpu_single_env
);
1833 pthread_mutex_unlock(&qemu_mutex
);
1836 static void kvm_mutex_lock(void)
1838 pthread_mutex_lock(&qemu_mutex
);
1839 cpu_single_env
= NULL
;
1842 void qemu_mutex_unlock_iothread(void)
1844 if (kvm_enabled()) {
1849 void qemu_mutex_lock_iothread(void)
1851 if (kvm_enabled()) {
1856 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1857 void kvm_add_ioperm_data(struct ioperm_data
*data
)
1859 QLIST_INSERT_HEAD(&ioperm_head
, data
, entries
);
1862 void kvm_remove_ioperm_data(unsigned long start_port
, unsigned long num
)
1864 struct ioperm_data
*data
;
1866 data
= QLIST_FIRST(&ioperm_head
);
1868 struct ioperm_data
*next
= QLIST_NEXT(data
, entries
);
1870 if (data
->start_port
== start_port
&& data
->num
== num
) {
1871 QLIST_REMOVE(data
, entries
);
1879 void kvm_ioperm(CPUState
*env
, void *data
)
1881 if (kvm_enabled() && qemu_system_ready
) {
1882 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
1888 int kvm_set_boot_cpu_id(uint32_t id
)
1890 return kvm_set_boot_vcpu_id(kvm_context
, id
);
1895 struct kvm_x86_mce_data
{
1897 struct kvm_x86_mce
*mce
;
1901 static void kvm_do_inject_x86_mce(void *_data
)
1903 struct kvm_x86_mce_data
*data
= _data
;
1906 /* If there is an MCE excpetion being processed, ignore this SRAO MCE */
1907 r
= kvm_mce_in_exception(data
->env
);
1909 fprintf(stderr
, "Failed to get MCE status\n");
1910 } else if (r
&& !(data
->mce
->status
& MCI_STATUS_AR
)) {
1913 r
= kvm_set_mce(data
->env
, data
->mce
);
1915 perror("kvm_set_mce FAILED");
1916 if (data
->abort_on_error
) {
1923 void kvm_inject_x86_mce(CPUState
*cenv
, int bank
, uint64_t status
,
1924 uint64_t mcg_status
, uint64_t addr
, uint64_t misc
,
1928 struct kvm_x86_mce mce
= {
1931 .mcg_status
= mcg_status
,
1935 struct kvm_x86_mce_data data
= {
1938 .abort_on_error
= abort_on_error
,
1941 if (!cenv
->mcg_cap
) {
1942 fprintf(stderr
, "MCE support is not enabled!\n");
1945 on_vcpu(cenv
, kvm_do_inject_x86_mce
, &data
);
1947 if (abort_on_error
) {