Merge commit '70783b9c9be31e98421f17327a1127021abae672' into upstream-merge
[qemu-kvm/markmc.git] / qemu-kvm.c
blob4c136280a43ca7e41c4c5dd4f9e237a434c4e4f6
1 /*
2 * qemu/kvm integration
4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
7 */
8 #include "config.h"
9 #include "config-host.h"
11 #include <assert.h>
12 #include <string.h>
13 #include "hw/hw.h"
14 #include "sysemu.h"
15 #include "qemu-common.h"
16 #include "console.h"
17 #include "block.h"
18 #include "compatfd.h"
19 #include "gdbstub.h"
21 #include "qemu-kvm.h"
22 #include "libkvm.h"
24 #include <pthread.h>
25 #include <sys/utsname.h>
26 #include <sys/syscall.h>
27 #include <sys/mman.h>
28 #include <sys/ioctl.h>
29 #include "compatfd.h"
30 #include <sys/prctl.h>
32 #define false 0
33 #define true 1
35 #ifndef PR_MCE_KILL
36 #define PR_MCE_KILL 33
37 #endif
39 #ifndef BUS_MCEERR_AR
40 #define BUS_MCEERR_AR 4
41 #endif
42 #ifndef BUS_MCEERR_AO
43 #define BUS_MCEERR_AO 5
44 #endif
46 #define EXPECTED_KVM_API_VERSION 12
48 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
49 #error libkvm: userspace and kernel version mismatch
50 #endif
52 int kvm_allowed = 1;
53 int kvm_irqchip = 1;
54 int kvm_pit = 1;
55 int kvm_pit_reinject = 1;
56 int kvm_nested = 0;
59 KVMState *kvm_state;
60 kvm_context_t kvm_context;
62 pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
63 pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
64 pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
65 pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
66 pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
67 __thread CPUState *current_env;
69 static int qemu_system_ready;
71 #define SIG_IPI (SIGRTMIN+4)
73 pthread_t io_thread;
74 static int io_thread_fd = -1;
75 static int io_thread_sigfd = -1;
77 static CPUState *kvm_debug_cpu_requested;
79 static uint64_t phys_ram_size;
81 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
82 /* The list of ioperm_data */
83 static QLIST_HEAD(, ioperm_data) ioperm_head;
84 #endif
86 //#define DEBUG_MEMREG
87 #ifdef DEBUG_MEMREG
88 #define DPRINTF(fmt, args...) \
89 do { fprintf(stderr, "%s:%d " fmt , __func__, __LINE__, ##args); } while (0)
90 #else
91 #define DPRINTF(fmt, args...) do {} while (0)
92 #endif
94 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
96 int kvm_abi = EXPECTED_KVM_API_VERSION;
97 int kvm_page_size;
99 #ifdef KVM_CAP_SET_GUEST_DEBUG
100 static int kvm_debug(CPUState *env,
101 struct kvm_debug_exit_arch *arch_info)
103 int handle = kvm_arch_debug(arch_info);
105 if (handle) {
106 kvm_debug_cpu_requested = env;
107 env->stopped = 1;
109 return handle;
111 #endif
113 static int handle_unhandled(uint64_t reason)
115 fprintf(stderr, "kvm: unhandled exit %" PRIx64 "\n", reason);
116 return -EINVAL;
120 static inline void set_gsi(kvm_context_t kvm, unsigned int gsi)
122 uint32_t *bitmap = kvm->used_gsi_bitmap;
124 if (gsi < kvm->max_gsi)
125 bitmap[gsi / 32] |= 1U << (gsi % 32);
126 else
127 DPRINTF("Invalid GSI %d\n");
130 static inline void clear_gsi(kvm_context_t kvm, unsigned int gsi)
132 uint32_t *bitmap = kvm->used_gsi_bitmap;
134 if (gsi < kvm->max_gsi)
135 bitmap[gsi / 32] &= ~(1U << (gsi % 32));
136 else
137 DPRINTF("Invalid GSI %d\n");
140 struct slot_info {
141 unsigned long phys_addr;
142 unsigned long len;
143 unsigned long userspace_addr;
144 unsigned flags;
145 int logging_count;
148 struct slot_info slots[KVM_MAX_NUM_MEM_REGIONS];
150 static void init_slots(void)
152 int i;
154 for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
155 slots[i].len = 0;
158 static int get_free_slot(kvm_context_t kvm)
160 int i;
161 int tss_ext;
163 #if defined(KVM_CAP_SET_TSS_ADDR) && !defined(__s390__)
164 tss_ext = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
165 #else
166 tss_ext = 0;
167 #endif
170 * on older kernels where the set tss ioctl is not supprted we must save
171 * slot 0 to hold the extended memory, as the vmx will use the last 3
172 * pages of this slot.
174 if (tss_ext > 0)
175 i = 0;
176 else
177 i = 1;
179 for (; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
180 if (!slots[i].len)
181 return i;
182 return -1;
185 static void register_slot(int slot, unsigned long phys_addr,
186 unsigned long len, unsigned long userspace_addr,
187 unsigned flags)
189 slots[slot].phys_addr = phys_addr;
190 slots[slot].len = len;
191 slots[slot].userspace_addr = userspace_addr;
192 slots[slot].flags = flags;
195 static void free_slot(int slot)
197 slots[slot].len = 0;
198 slots[slot].logging_count = 0;
201 static int get_slot(unsigned long phys_addr)
203 int i;
205 for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i) {
206 if (slots[i].len && slots[i].phys_addr <= phys_addr &&
207 (slots[i].phys_addr + slots[i].len - 1) >= phys_addr)
208 return i;
210 return -1;
213 /* Returns -1 if this slot is not totally contained on any other,
214 * and the number of the slot otherwise */
215 static int get_container_slot(uint64_t phys_addr, unsigned long size)
217 int i;
219 for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
220 if (slots[i].len && slots[i].phys_addr <= phys_addr &&
221 (slots[i].phys_addr + slots[i].len) >= phys_addr + size)
222 return i;
223 return -1;
226 int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_addr,
227 unsigned long size)
229 int slot = get_container_slot(phys_addr, size);
230 if (slot == -1)
231 return 0;
232 return 1;
236 * dirty pages logging control
238 static int kvm_dirty_pages_log_change(kvm_context_t kvm,
239 unsigned long phys_addr, unsigned flags,
240 unsigned mask)
242 int r = -1;
243 int slot = get_slot(phys_addr);
245 if (slot == -1) {
246 fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
247 return 1;
250 flags = (slots[slot].flags & ~mask) | flags;
251 if (flags == slots[slot].flags)
252 return 0;
253 slots[slot].flags = flags;
256 struct kvm_userspace_memory_region mem = {
257 .slot = slot,
258 .memory_size = slots[slot].len,
259 .guest_phys_addr = slots[slot].phys_addr,
260 .userspace_addr = slots[slot].userspace_addr,
261 .flags = slots[slot].flags,
265 DPRINTF("slot %d start %llx len %llx flags %x\n",
266 mem.slot, mem.guest_phys_addr, mem.memory_size, mem.flags);
267 r = kvm_vm_ioctl(kvm_state, KVM_SET_USER_MEMORY_REGION, &mem);
268 if (r < 0)
269 fprintf(stderr, "%s: %m\n", __FUNCTION__);
271 return r;
274 static int kvm_dirty_pages_log_change_all(kvm_context_t kvm,
275 int (*change)(kvm_context_t kvm,
276 uint64_t start,
277 uint64_t len))
279 int i, r;
281 for (i = r = 0; i < KVM_MAX_NUM_MEM_REGIONS && r == 0; i++) {
282 if (slots[i].len)
283 r = change(kvm, slots[i].phys_addr, slots[i].len);
285 return r;
288 int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm, uint64_t phys_addr,
289 uint64_t len)
291 int slot = get_slot(phys_addr);
293 DPRINTF("start %" PRIx64 " len %" PRIx64 "\n", phys_addr, len);
294 if (slot == -1) {
295 fprintf(stderr, "BUG: %s: invalid parameters\n", __func__);
296 return -EINVAL;
299 if (slots[slot].logging_count++)
300 return 0;
302 return kvm_dirty_pages_log_change(kvm, slots[slot].phys_addr,
303 KVM_MEM_LOG_DIRTY_PAGES,
304 KVM_MEM_LOG_DIRTY_PAGES);
307 int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm, uint64_t phys_addr,
308 uint64_t len)
310 int slot = get_slot(phys_addr);
312 if (slot == -1) {
313 fprintf(stderr, "BUG: %s: invalid parameters\n", __func__);
314 return -EINVAL;
317 if (--slots[slot].logging_count)
318 return 0;
320 return kvm_dirty_pages_log_change(kvm, slots[slot].phys_addr, 0,
321 KVM_MEM_LOG_DIRTY_PAGES);
325 * Enable dirty page logging for all memory regions
327 int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
329 if (kvm->dirty_pages_log_all)
330 return 0;
331 kvm->dirty_pages_log_all = 1;
332 return kvm_dirty_pages_log_change_all(kvm, kvm_dirty_pages_log_enable_slot);
336 * Enable dirty page logging only for memory regions that were created with
337 * dirty logging enabled (disable for all other memory regions).
339 int kvm_dirty_pages_log_reset(kvm_context_t kvm)
341 if (!kvm->dirty_pages_log_all)
342 return 0;
343 kvm->dirty_pages_log_all = 0;
344 return kvm_dirty_pages_log_change_all(kvm,
345 kvm_dirty_pages_log_disable_slot);
349 static int kvm_create_context(void);
351 int kvm_init(int smp_cpus)
353 int fd;
354 int r, gsi_count;
357 fd = open("/dev/kvm", O_RDWR);
358 if (fd == -1) {
359 perror("open /dev/kvm");
360 return -1;
362 r = ioctl(fd, KVM_GET_API_VERSION, 0);
363 if (r == -1) {
364 fprintf(stderr,
365 "kvm kernel version too old: "
366 "KVM_GET_API_VERSION ioctl not supported\n");
367 goto out_close;
369 if (r < EXPECTED_KVM_API_VERSION) {
370 fprintf(stderr, "kvm kernel version too old: "
371 "We expect API version %d or newer, but got "
372 "version %d\n", EXPECTED_KVM_API_VERSION, r);
373 goto out_close;
375 if (r > EXPECTED_KVM_API_VERSION) {
376 fprintf(stderr, "kvm userspace version too old\n");
377 goto out_close;
379 kvm_abi = r;
380 kvm_page_size = getpagesize();
381 kvm_state = qemu_mallocz(sizeof(*kvm_state));
382 kvm_context = &kvm_state->kvm_context;
384 kvm_state->fd = fd;
385 kvm_state->vmfd = -1;
386 kvm_context->opaque = cpu_single_env;
387 kvm_context->dirty_pages_log_all = 0;
388 kvm_context->no_irqchip_creation = 0;
389 kvm_context->no_pit_creation = 0;
391 #ifdef KVM_CAP_SET_GUEST_DEBUG
392 QTAILQ_INIT(&kvm_state->kvm_sw_breakpoints);
393 #endif
395 gsi_count = kvm_get_gsi_count(kvm_context);
396 if (gsi_count > 0) {
397 int gsi_bits, i;
399 /* Round up so we can search ints using ffs */
400 gsi_bits = ALIGN(gsi_count, 32);
401 kvm_context->used_gsi_bitmap = qemu_mallocz(gsi_bits / 8);
402 kvm_context->max_gsi = gsi_bits;
404 /* Mark any over-allocated bits as already in use */
405 for (i = gsi_count; i < gsi_bits; i++)
406 set_gsi(kvm_context, i);
409 pthread_mutex_lock(&qemu_mutex);
410 return kvm_create_context();
412 out_close:
413 close(fd);
414 return -1;
417 static void kvm_finalize(KVMState *s)
419 /* FIXME
420 if (kvm->vcpu_fd[0] != -1)
421 close(kvm->vcpu_fd[0]);
422 if (kvm->vm_fd != -1)
423 close(kvm->vm_fd);
425 close(s->fd);
426 free(s);
429 void kvm_disable_irqchip_creation(kvm_context_t kvm)
431 kvm->no_irqchip_creation = 1;
434 void kvm_disable_pit_creation(kvm_context_t kvm)
436 kvm->no_pit_creation = 1;
439 static void kvm_create_vcpu(CPUState *env, int id)
441 long mmap_size;
442 int r;
444 r = kvm_vm_ioctl(kvm_state, KVM_CREATE_VCPU, id);
445 if (r < 0) {
446 fprintf(stderr, "kvm_create_vcpu: %m\n");
447 return;
450 env->kvm_fd = r;
451 env->kvm_state = kvm_state;
453 mmap_size = kvm_ioctl(kvm_state, KVM_GET_VCPU_MMAP_SIZE, 0);
454 if (mmap_size < 0) {
455 fprintf(stderr, "get vcpu mmap size: %m\n");
456 goto err_fd;
458 env->kvm_run =
459 mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, env->kvm_fd,
461 if (env->kvm_run == MAP_FAILED) {
462 fprintf(stderr, "mmap vcpu area: %m\n");
463 goto err_fd;
466 return;
467 err_fd:
468 close(env->kvm_fd);
471 static int kvm_set_boot_vcpu_id(kvm_context_t kvm, uint32_t id)
473 #ifdef KVM_CAP_SET_BOOT_CPU_ID
474 int r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_BOOT_CPU_ID);
475 if (r > 0)
476 return kvm_vm_ioctl(kvm_state, KVM_SET_BOOT_CPU_ID, id);
477 return -ENOSYS;
478 #else
479 return -ENOSYS;
480 #endif
483 int kvm_create_vm(kvm_context_t kvm)
485 int fd;
486 #ifdef KVM_CAP_IRQ_ROUTING
487 kvm->irq_routes = qemu_mallocz(sizeof(*kvm->irq_routes));
488 kvm->nr_allocated_irq_routes = 0;
489 #endif
491 fd = kvm_ioctl(kvm_state, KVM_CREATE_VM, 0);
492 if (fd < 0) {
493 fprintf(stderr, "kvm_create_vm: %m\n");
494 return -1;
496 kvm_state->vmfd = fd;
497 return 0;
500 static int kvm_create_default_phys_mem(kvm_context_t kvm,
501 unsigned long phys_mem_bytes,
502 void **vm_mem)
504 #ifdef KVM_CAP_USER_MEMORY
505 int r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
506 if (r > 0)
507 return 0;
508 fprintf(stderr,
509 "Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported\n");
510 #else
511 #error Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported
512 #endif
513 return -1;
516 void kvm_create_irqchip(kvm_context_t kvm)
518 int r;
520 kvm->irqchip_in_kernel = 0;
521 #ifdef KVM_CAP_IRQCHIP
522 if (!kvm->no_irqchip_creation) {
523 r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP);
524 if (r > 0) { /* kernel irqchip supported */
525 r = kvm_vm_ioctl(kvm_state, KVM_CREATE_IRQCHIP);
526 if (r >= 0) {
527 kvm->irqchip_inject_ioctl = KVM_IRQ_LINE;
528 #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
529 r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
530 KVM_CAP_IRQ_INJECT_STATUS);
531 if (r > 0)
532 kvm->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
533 #endif
534 kvm->irqchip_in_kernel = 1;
535 } else
536 fprintf(stderr, "Create kernel PIC irqchip failed\n");
539 #endif
540 kvm_state->irqchip_in_kernel = kvm->irqchip_in_kernel;
543 int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem)
545 int r;
547 r = kvm_create_vm(kvm);
548 if (r < 0)
549 return r;
550 r = kvm_arch_create(kvm, phys_mem_bytes, vm_mem);
551 if (r < 0)
552 return r;
553 init_slots();
554 r = kvm_create_default_phys_mem(kvm, phys_mem_bytes, vm_mem);
555 if (r < 0)
556 return r;
557 kvm_create_irqchip(kvm);
559 return 0;
563 int kvm_register_phys_mem(kvm_context_t kvm,
564 unsigned long phys_start, void *userspace_addr,
565 unsigned long len, int log)
568 struct kvm_userspace_memory_region memory = {
569 .memory_size = len,
570 .guest_phys_addr = phys_start,
571 .userspace_addr = (unsigned long) (uintptr_t) userspace_addr,
572 .flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
574 int r;
576 memory.slot = get_free_slot(kvm);
577 DPRINTF
578 ("memory: gpa: %llx, size: %llx, uaddr: %llx, slot: %x, flags: %lx\n",
579 memory.guest_phys_addr, memory.memory_size, memory.userspace_addr,
580 memory.slot, memory.flags);
581 r = kvm_vm_ioctl(kvm_state, KVM_SET_USER_MEMORY_REGION, &memory);
582 if (r < 0) {
583 fprintf(stderr, "create_userspace_phys_mem: %s\n", strerror(-r));
584 return -1;
586 register_slot(memory.slot, memory.guest_phys_addr, memory.memory_size,
587 memory.userspace_addr, memory.flags);
588 return 0;
592 /* destroy/free a whole slot.
593 * phys_start, len and slot are the params passed to kvm_create_phys_mem()
595 void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
596 unsigned long len)
598 int slot;
599 int r;
600 struct kvm_userspace_memory_region memory = {
601 .memory_size = 0,
602 .guest_phys_addr = phys_start,
603 .userspace_addr = 0,
604 .flags = 0,
607 slot = get_slot(phys_start);
609 if ((slot >= KVM_MAX_NUM_MEM_REGIONS) || (slot == -1)) {
610 fprintf(stderr, "BUG: %s: invalid parameters (slot=%d)\n", __FUNCTION__,
611 slot);
612 return;
614 if (phys_start != slots[slot].phys_addr) {
615 fprintf(stderr,
616 "WARNING: %s: phys_start is 0x%lx expecting 0x%lx\n",
617 __FUNCTION__, phys_start, slots[slot].phys_addr);
618 phys_start = slots[slot].phys_addr;
621 memory.slot = slot;
622 DPRINTF("slot %d start %llx len %llx flags %x\n",
623 memory.slot, memory.guest_phys_addr, memory.memory_size,
624 memory.flags);
625 r = kvm_vm_ioctl(kvm_state, KVM_SET_USER_MEMORY_REGION, &memory);
626 if (r < 0) {
627 fprintf(stderr, "destroy_userspace_phys_mem: %s", strerror(-r));
628 return;
631 free_slot(memory.slot);
634 void kvm_unregister_memory_area(kvm_context_t kvm, uint64_t phys_addr,
635 unsigned long size)
638 int slot = get_container_slot(phys_addr, size);
640 if (slot != -1) {
641 DPRINTF("Unregistering memory region %llx (%lx)\n", phys_addr, size);
642 kvm_destroy_phys_mem(kvm, phys_addr, size);
643 return;
647 static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
649 int r;
650 struct kvm_dirty_log log = {
651 .slot = slot,
654 log.dirty_bitmap = buf;
656 r = kvm_vm_ioctl(kvm_state, ioctl_num, &log);
657 if (r < 0)
658 return r;
659 return 0;
662 int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
664 int slot;
666 slot = get_slot(phys_addr);
667 return kvm_get_map(kvm, KVM_GET_DIRTY_LOG, slot, buf);
670 int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
671 unsigned long len, void *opaque,
672 int (*cb)(unsigned long start,
673 unsigned long len, void *bitmap,
674 void *opaque))
676 int i;
677 int r;
678 unsigned long end_addr = phys_addr + len;
679 void *buf;
681 for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i) {
682 if ((slots[i].len && (uint64_t) slots[i].phys_addr >= phys_addr)
683 && ((uint64_t) slots[i].phys_addr + slots[i].len <= end_addr)) {
684 buf = qemu_malloc(BITMAP_SIZE(slots[i].len));
685 r = kvm_get_map(kvm, KVM_GET_DIRTY_LOG, i, buf);
686 if (r) {
687 qemu_free(buf);
688 return r;
690 r = cb(slots[i].phys_addr, slots[i].len, buf, opaque);
691 qemu_free(buf);
692 if (r)
693 return r;
696 return 0;
699 #ifdef KVM_CAP_IRQCHIP
701 int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status)
703 struct kvm_irq_level event;
704 int r;
706 if (!kvm->irqchip_in_kernel)
707 return 0;
708 event.level = level;
709 event.irq = irq;
710 r = kvm_vm_ioctl(kvm_state, kvm->irqchip_inject_ioctl, &event);
711 if (r < 0)
712 perror("kvm_set_irq_level");
714 if (status) {
715 #ifdef KVM_CAP_IRQ_INJECT_STATUS
716 *status =
717 (kvm->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
718 #else
719 *status = 1;
720 #endif
723 return 1;
726 int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
728 int r;
730 if (!kvm->irqchip_in_kernel)
731 return 0;
732 r = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, chip);
733 if (r < 0) {
734 perror("kvm_get_irqchip\n");
736 return r;
739 int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
741 int r;
743 if (!kvm->irqchip_in_kernel)
744 return 0;
745 r = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip);
746 if (r < 0) {
747 perror("kvm_set_irqchip\n");
749 return r;
752 #endif
754 static int handle_debug(CPUState *env)
756 #ifdef KVM_CAP_SET_GUEST_DEBUG
757 struct kvm_run *run = env->kvm_run;
759 return kvm_debug(env, &run->debug.arch);
760 #else
761 return 0;
762 #endif
765 int kvm_get_regs(CPUState *env, struct kvm_regs *regs)
767 return kvm_vcpu_ioctl(env, KVM_GET_REGS, regs);
770 int kvm_set_regs(CPUState *env, struct kvm_regs *regs)
772 return kvm_vcpu_ioctl(env, KVM_SET_REGS, regs);
775 int kvm_get_fpu(CPUState *env, struct kvm_fpu *fpu)
777 return kvm_vcpu_ioctl(env, KVM_GET_FPU, fpu);
780 int kvm_set_fpu(CPUState *env, struct kvm_fpu *fpu)
782 return kvm_vcpu_ioctl(env, KVM_SET_FPU, fpu);
785 int kvm_get_sregs(CPUState *env, struct kvm_sregs *sregs)
787 return kvm_vcpu_ioctl(env, KVM_GET_SREGS, sregs);
790 int kvm_set_sregs(CPUState *env, struct kvm_sregs *sregs)
792 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, sregs);
795 #ifdef KVM_CAP_MP_STATE
796 int kvm_get_mpstate(CPUState *env, struct kvm_mp_state *mp_state)
798 int r;
800 r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
801 if (r > 0)
802 return kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, mp_state);
803 return -ENOSYS;
806 int kvm_set_mpstate(CPUState *env, struct kvm_mp_state *mp_state)
808 int r;
810 r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
811 if (r > 0)
812 return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, mp_state);
813 return -ENOSYS;
815 #endif
817 static int handle_mmio(CPUState *env)
819 unsigned long addr = env->kvm_run->mmio.phys_addr;
820 struct kvm_run *kvm_run = env->kvm_run;
821 void *data = kvm_run->mmio.data;
823 /* hack: Red Hat 7.1 generates these weird accesses. */
824 if ((addr > 0xa0000 - 4 && addr <= 0xa0000) && kvm_run->mmio.len == 3)
825 return 0;
827 cpu_physical_memory_rw(addr, data, kvm_run->mmio.len, kvm_run->mmio.is_write);
828 return 0;
831 int handle_io_window(kvm_context_t kvm)
833 return 1;
836 int handle_shutdown(kvm_context_t kvm, CPUState *env)
838 /* stop the current vcpu from going back to guest mode */
839 env->stopped = 1;
841 qemu_system_reset_request();
842 return 1;
845 static inline void push_nmi(kvm_context_t kvm)
847 #ifdef KVM_CAP_USER_NMI
848 kvm_arch_push_nmi(kvm->opaque);
849 #endif /* KVM_CAP_USER_NMI */
852 void post_kvm_run(kvm_context_t kvm, CPUState *env)
854 pthread_mutex_lock(&qemu_mutex);
855 kvm_arch_post_run(env, env->kvm_run);
856 cpu_single_env = env;
859 int pre_kvm_run(kvm_context_t kvm, CPUState *env)
861 kvm_arch_pre_run(env, env->kvm_run);
863 pthread_mutex_unlock(&qemu_mutex);
864 return 0;
867 int kvm_is_ready_for_interrupt_injection(CPUState *env)
869 return env->kvm_run->ready_for_interrupt_injection;
872 int kvm_run(CPUState *env)
874 int r;
875 kvm_context_t kvm = &env->kvm_state->kvm_context;
876 struct kvm_run *run = env->kvm_run;
877 int fd = env->kvm_fd;
879 again:
880 push_nmi(kvm);
881 #if !defined(__s390__)
882 if (!kvm->irqchip_in_kernel)
883 run->request_interrupt_window = kvm_arch_try_push_interrupts(env);
884 #endif
886 if (env->kvm_cpu_state.regs_modified) {
887 kvm_arch_put_registers(env);
888 env->kvm_cpu_state.regs_modified = 0;
891 r = pre_kvm_run(kvm, env);
892 if (r)
893 return r;
894 r = ioctl(fd, KVM_RUN, 0);
896 if (r == -1 && errno != EINTR && errno != EAGAIN) {
897 r = -errno;
898 post_kvm_run(kvm, env);
899 fprintf(stderr, "kvm_run: %s\n", strerror(-r));
900 return r;
903 post_kvm_run(kvm, env);
905 #if defined(KVM_CAP_COALESCED_MMIO)
906 if (kvm_state->coalesced_mmio) {
907 struct kvm_coalesced_mmio_ring *ring =
908 (void *) run + kvm_state->coalesced_mmio * PAGE_SIZE;
909 while (ring->first != ring->last) {
910 cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
911 &ring->coalesced_mmio[ring->first].data[0],
912 ring->coalesced_mmio[ring->first].len, 1);
913 smp_wmb();
914 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
917 #endif
919 #if !defined(__s390__)
920 if (r == -1) {
921 r = handle_io_window(kvm);
922 goto more;
924 #endif
925 if (1) {
926 switch (run->exit_reason) {
927 case KVM_EXIT_UNKNOWN:
928 r = handle_unhandled(run->hw.hardware_exit_reason);
929 break;
930 case KVM_EXIT_FAIL_ENTRY:
931 r = handle_unhandled(run->fail_entry.hardware_entry_failure_reason);
932 break;
933 case KVM_EXIT_EXCEPTION:
934 fprintf(stderr, "exception %d (%x)\n", run->ex.exception,
935 run->ex.error_code);
936 kvm_show_regs(env);
937 kvm_show_code(env);
938 abort();
939 break;
940 case KVM_EXIT_IO:
941 r = kvm_handle_io(run->io.port,
942 (uint8_t *)run + run->io.data_offset,
943 run->io.direction,
944 run->io.size,
945 run->io.count);
946 break;
947 case KVM_EXIT_DEBUG:
948 r = handle_debug(env);
949 break;
950 case KVM_EXIT_MMIO:
951 r = handle_mmio(env);
952 break;
953 case KVM_EXIT_HLT:
954 r = kvm_arch_halt(env);
955 break;
956 case KVM_EXIT_IRQ_WINDOW_OPEN:
957 break;
958 case KVM_EXIT_SHUTDOWN:
959 r = handle_shutdown(kvm, env);
960 break;
961 #if defined(__s390__)
962 case KVM_EXIT_S390_SIEIC:
963 r = kvm_s390_handle_intercept(kvm, env, run);
964 break;
965 case KVM_EXIT_S390_RESET:
966 r = kvm_s390_handle_reset(kvm, env, run);
967 break;
968 #endif
969 case KVM_EXIT_INTERNAL_ERROR:
970 fprintf(stderr, "KVM internal error. Suberror: %d\n",
971 run->internal.suberror);
972 kvm_show_regs(env);
973 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION)
974 fprintf(stderr, "emulation failure, check dmesg for details\n");
975 abort();
976 break;
977 default:
978 if (kvm_arch_run(env)) {
979 fprintf(stderr, "unhandled vm exit: 0x%x\n", run->exit_reason);
980 kvm_show_regs(env);
981 abort();
983 break;
986 more:
987 if (!r)
988 goto again;
989 return r;
992 int kvm_inject_irq(CPUState *env, unsigned irq)
994 struct kvm_interrupt intr;
996 intr.irq = irq;
997 return kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
1000 #ifdef KVM_CAP_SET_GUEST_DEBUG
1001 int kvm_set_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
1003 return kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, dbg);
1005 #endif
1007 int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
1009 struct kvm_signal_mask *sigmask;
1010 int r;
1012 if (!sigset) {
1013 return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
1015 sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset));
1017 sigmask->len = 8;
1018 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1019 r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
1020 free(sigmask);
1021 return r;
1024 int kvm_pit_in_kernel(kvm_context_t kvm)
1026 return kvm->pit_in_kernel;
1029 int kvm_inject_nmi(CPUState *env)
1031 #ifdef KVM_CAP_USER_NMI
1032 return kvm_vcpu_ioctl(env, KVM_NMI);
1033 #else
1034 return -ENOSYS;
1035 #endif
1038 int kvm_init_coalesced_mmio(kvm_context_t kvm)
1040 int r = 0;
1041 kvm_state->coalesced_mmio = 0;
1042 #ifdef KVM_CAP_COALESCED_MMIO
1043 r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
1044 if (r > 0) {
1045 kvm_state->coalesced_mmio = r;
1046 return 0;
1048 #endif
1049 return r;
1052 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1053 int kvm_assign_pci_device(kvm_context_t kvm,
1054 struct kvm_assigned_pci_dev *assigned_dev)
1056 return kvm_vm_ioctl(kvm_state, KVM_ASSIGN_PCI_DEVICE, assigned_dev);
1059 static int kvm_old_assign_irq(kvm_context_t kvm,
1060 struct kvm_assigned_irq *assigned_irq)
1062 return kvm_vm_ioctl(kvm_state, KVM_ASSIGN_IRQ, assigned_irq);
1065 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
1066 int kvm_assign_irq(kvm_context_t kvm, struct kvm_assigned_irq *assigned_irq)
1068 int ret;
1070 ret = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ);
1071 if (ret > 0) {
1072 return kvm_vm_ioctl(kvm_state, KVM_ASSIGN_DEV_IRQ, assigned_irq);
1075 return kvm_old_assign_irq(kvm, assigned_irq);
1078 int kvm_deassign_irq(kvm_context_t kvm, struct kvm_assigned_irq *assigned_irq)
1080 return kvm_vm_ioctl(kvm_state, KVM_DEASSIGN_DEV_IRQ, assigned_irq);
1082 #else
1083 int kvm_assign_irq(kvm_context_t kvm, struct kvm_assigned_irq *assigned_irq)
1085 return kvm_old_assign_irq(kvm, assigned_irq);
1087 #endif
1088 #endif
1090 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
1091 int kvm_deassign_pci_device(kvm_context_t kvm,
1092 struct kvm_assigned_pci_dev *assigned_dev)
1094 return kvm_vm_ioctl(kvm_state, KVM_DEASSIGN_PCI_DEVICE, assigned_dev);
1096 #endif
1098 int kvm_destroy_memory_region_works(kvm_context_t kvm)
1100 int ret = 0;
1102 #ifdef KVM_CAP_DESTROY_MEMORY_REGION_WORKS
1103 ret =
1104 kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
1105 KVM_CAP_DESTROY_MEMORY_REGION_WORKS);
1106 if (ret <= 0)
1107 ret = 0;
1108 #endif
1109 return ret;
1112 int kvm_reinject_control(kvm_context_t kvm, int pit_reinject)
1114 #ifdef KVM_CAP_REINJECT_CONTROL
1115 int r;
1116 struct kvm_reinject_control control;
1118 control.pit_reinject = pit_reinject;
1120 r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL);
1121 if (r > 0) {
1122 return kvm_vm_ioctl(kvm_state, KVM_REINJECT_CONTROL, &control);
1124 #endif
1125 return -ENOSYS;
1128 int kvm_has_gsi_routing(kvm_context_t kvm)
1130 int r = 0;
1132 #ifdef KVM_CAP_IRQ_ROUTING
1133 r = kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
1134 #endif
1135 return r;
1138 int kvm_get_gsi_count(kvm_context_t kvm)
1140 #ifdef KVM_CAP_IRQ_ROUTING
1141 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
1142 #else
1143 return -EINVAL;
1144 #endif
1147 int kvm_clear_gsi_routes(kvm_context_t kvm)
1149 #ifdef KVM_CAP_IRQ_ROUTING
1150 kvm->irq_routes->nr = 0;
1151 return 0;
1152 #else
1153 return -EINVAL;
1154 #endif
1157 int kvm_add_routing_entry(kvm_context_t kvm,
1158 struct kvm_irq_routing_entry *entry)
1160 #ifdef KVM_CAP_IRQ_ROUTING
1161 struct kvm_irq_routing *z;
1162 struct kvm_irq_routing_entry *new;
1163 int n, size;
1165 if (kvm->irq_routes->nr == kvm->nr_allocated_irq_routes) {
1166 n = kvm->nr_allocated_irq_routes * 2;
1167 if (n < 64)
1168 n = 64;
1169 size = sizeof(struct kvm_irq_routing);
1170 size += n * sizeof(*new);
1171 z = realloc(kvm->irq_routes, size);
1172 if (!z)
1173 return -ENOMEM;
1174 kvm->nr_allocated_irq_routes = n;
1175 kvm->irq_routes = z;
1177 n = kvm->irq_routes->nr++;
1178 new = &kvm->irq_routes->entries[n];
1179 memset(new, 0, sizeof(*new));
1180 new->gsi = entry->gsi;
1181 new->type = entry->type;
1182 new->flags = entry->flags;
1183 new->u = entry->u;
1185 set_gsi(kvm, entry->gsi);
1187 return 0;
1188 #else
1189 return -ENOSYS;
1190 #endif
1193 int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
1195 #ifdef KVM_CAP_IRQ_ROUTING
1196 struct kvm_irq_routing_entry e;
1198 e.gsi = gsi;
1199 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1200 e.flags = 0;
1201 e.u.irqchip.irqchip = irqchip;
1202 e.u.irqchip.pin = pin;
1203 return kvm_add_routing_entry(kvm, &e);
1204 #else
1205 return -ENOSYS;
1206 #endif
1209 int kvm_del_routing_entry(kvm_context_t kvm,
1210 struct kvm_irq_routing_entry *entry)
1212 #ifdef KVM_CAP_IRQ_ROUTING
1213 struct kvm_irq_routing_entry *e, *p;
1214 int i, gsi, found = 0;
1216 gsi = entry->gsi;
1218 for (i = 0; i < kvm->irq_routes->nr; ++i) {
1219 e = &kvm->irq_routes->entries[i];
1220 if (e->type == entry->type && e->gsi == gsi) {
1221 switch (e->type) {
1222 case KVM_IRQ_ROUTING_IRQCHIP:{
1223 if (e->u.irqchip.irqchip ==
1224 entry->u.irqchip.irqchip
1225 && e->u.irqchip.pin == entry->u.irqchip.pin) {
1226 p = &kvm->irq_routes->entries[--kvm->irq_routes->nr];
1227 *e = *p;
1228 found = 1;
1230 break;
1232 case KVM_IRQ_ROUTING_MSI:{
1233 if (e->u.msi.address_lo ==
1234 entry->u.msi.address_lo
1235 && e->u.msi.address_hi ==
1236 entry->u.msi.address_hi
1237 && e->u.msi.data == entry->u.msi.data) {
1238 p = &kvm->irq_routes->entries[--kvm->irq_routes->nr];
1239 *e = *p;
1240 found = 1;
1242 break;
1244 default:
1245 break;
1247 if (found) {
1248 /* If there are no other users of this GSI
1249 * mark it available in the bitmap */
1250 for (i = 0; i < kvm->irq_routes->nr; i++) {
1251 e = &kvm->irq_routes->entries[i];
1252 if (e->gsi == gsi)
1253 break;
1255 if (i == kvm->irq_routes->nr)
1256 clear_gsi(kvm, gsi);
1258 return 0;
1262 return -ESRCH;
1263 #else
1264 return -ENOSYS;
1265 #endif
1268 int kvm_update_routing_entry(kvm_context_t kvm,
1269 struct kvm_irq_routing_entry *entry,
1270 struct kvm_irq_routing_entry *newentry)
1272 #ifdef KVM_CAP_IRQ_ROUTING
1273 struct kvm_irq_routing_entry *e;
1274 int i;
1276 if (entry->gsi != newentry->gsi || entry->type != newentry->type) {
1277 return -EINVAL;
1280 for (i = 0; i < kvm->irq_routes->nr; ++i) {
1281 e = &kvm->irq_routes->entries[i];
1282 if (e->type != entry->type || e->gsi != entry->gsi) {
1283 continue;
1285 switch (e->type) {
1286 case KVM_IRQ_ROUTING_IRQCHIP:
1287 if (e->u.irqchip.irqchip == entry->u.irqchip.irqchip &&
1288 e->u.irqchip.pin == entry->u.irqchip.pin) {
1289 memcpy(&e->u.irqchip, &newentry->u.irqchip,
1290 sizeof e->u.irqchip);
1291 return 0;
1293 break;
1294 case KVM_IRQ_ROUTING_MSI:
1295 if (e->u.msi.address_lo == entry->u.msi.address_lo &&
1296 e->u.msi.address_hi == entry->u.msi.address_hi &&
1297 e->u.msi.data == entry->u.msi.data) {
1298 memcpy(&e->u.msi, &newentry->u.msi, sizeof e->u.msi);
1299 return 0;
1301 break;
1302 default:
1303 break;
1306 return -ESRCH;
1307 #else
1308 return -ENOSYS;
1309 #endif
1312 int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
1314 #ifdef KVM_CAP_IRQ_ROUTING
1315 struct kvm_irq_routing_entry e;
1317 e.gsi = gsi;
1318 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1319 e.flags = 0;
1320 e.u.irqchip.irqchip = irqchip;
1321 e.u.irqchip.pin = pin;
1322 return kvm_del_routing_entry(kvm, &e);
1323 #else
1324 return -ENOSYS;
1325 #endif
1328 int kvm_commit_irq_routes(kvm_context_t kvm)
1330 #ifdef KVM_CAP_IRQ_ROUTING
1331 kvm->irq_routes->flags = 0;
1332 return kvm_vm_ioctl(kvm_state, KVM_SET_GSI_ROUTING, kvm->irq_routes);
1333 #else
1334 return -ENOSYS;
1335 #endif
1338 int kvm_get_irq_route_gsi(kvm_context_t kvm)
1340 int i, bit;
1341 uint32_t *buf = kvm->used_gsi_bitmap;
1343 /* Return the lowest unused GSI in the bitmap */
1344 for (i = 0; i < kvm->max_gsi / 32; i++) {
1345 bit = ffs(~buf[i]);
1346 if (!bit)
1347 continue;
1349 return bit - 1 + i * 32;
1352 return -ENOSPC;
1355 #ifdef KVM_CAP_DEVICE_MSIX
1356 int kvm_assign_set_msix_nr(kvm_context_t kvm,
1357 struct kvm_assigned_msix_nr *msix_nr)
1359 return kvm_vm_ioctl(kvm_state, KVM_ASSIGN_SET_MSIX_NR, msix_nr);
1362 int kvm_assign_set_msix_entry(kvm_context_t kvm,
1363 struct kvm_assigned_msix_entry *entry)
1365 return kvm_vm_ioctl(kvm_state, KVM_ASSIGN_SET_MSIX_ENTRY, entry);
1367 #endif
1369 #if defined(KVM_CAP_IRQFD) && defined(CONFIG_EVENTFD)
1371 #include <sys/eventfd.h>
1373 static int _kvm_irqfd(kvm_context_t kvm, int fd, int gsi, int flags)
1375 struct kvm_irqfd data = {
1376 .fd = fd,
1377 .gsi = gsi,
1378 .flags = flags,
1381 return kvm_vm_ioctl(kvm_state, KVM_IRQFD, &data);
1384 int kvm_irqfd(kvm_context_t kvm, int gsi, int flags)
1386 int r;
1387 int fd;
1389 if (!kvm_check_extension(kvm_state, KVM_CAP_IRQFD))
1390 return -ENOENT;
1392 fd = eventfd(0, 0);
1393 if (fd < 0)
1394 return -errno;
1396 r = _kvm_irqfd(kvm, fd, gsi, 0);
1397 if (r < 0) {
1398 close(fd);
1399 return -errno;
1402 return fd;
1405 #else /* KVM_CAP_IRQFD */
1407 int kvm_irqfd(kvm_context_t kvm, int gsi, int flags)
1409 return -ENOSYS;
1412 #endif /* KVM_CAP_IRQFD */
1413 static inline unsigned long kvm_get_thread_id(void)
1415 return syscall(SYS_gettid);
1418 static void qemu_cond_wait(pthread_cond_t *cond)
1420 CPUState *env = cpu_single_env;
1422 pthread_cond_wait(cond, &qemu_mutex);
1423 cpu_single_env = env;
1426 static void sig_ipi_handler(int n)
1430 static void hardware_memory_error(void)
1432 fprintf(stderr, "Hardware memory error!\n");
1433 exit(1);
1436 static void sigbus_reraise(void)
1438 sigset_t set;
1439 struct sigaction action;
1441 memset(&action, 0, sizeof(action));
1442 action.sa_handler = SIG_DFL;
1443 if (!sigaction(SIGBUS, &action, NULL)) {
1444 raise(SIGBUS);
1445 sigemptyset(&set);
1446 sigaddset(&set, SIGBUS);
1447 sigprocmask(SIG_UNBLOCK, &set, NULL);
1449 perror("Failed to re-raise SIGBUS!\n");
1450 abort();
1453 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
1454 void *ctx)
1456 #if defined(KVM_CAP_MCE) && defined(TARGET_I386)
1457 if (first_cpu->mcg_cap && siginfo->ssi_addr
1458 && siginfo->ssi_code == BUS_MCEERR_AO) {
1459 uint64_t status;
1460 unsigned long paddr;
1461 CPUState *cenv;
1463 /* Hope we are lucky for AO MCE */
1464 if (do_qemu_ram_addr_from_host((void *)(intptr_t)siginfo->ssi_addr,
1465 &paddr)) {
1466 fprintf(stderr, "Hardware memory error for memory used by "
1467 "QEMU itself instead of guest system!: %llx\n",
1468 (unsigned long long)siginfo->ssi_addr);
1469 return;
1471 status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
1472 | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
1473 | 0xc0;
1474 kvm_inject_x86_mce(first_cpu, 9, status,
1475 MCG_STATUS_MCIP | MCG_STATUS_RIPV, paddr,
1476 (MCM_ADDR_PHYS << 6) | 0xc, 1);
1477 for (cenv = first_cpu->next_cpu; cenv != NULL; cenv = cenv->next_cpu)
1478 kvm_inject_x86_mce(cenv, 1, MCI_STATUS_VAL | MCI_STATUS_UC,
1479 MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0, 1);
1480 } else
1481 #endif
1483 if (siginfo->ssi_code == BUS_MCEERR_AO)
1484 return;
1485 else if (siginfo->ssi_code == BUS_MCEERR_AR)
1486 hardware_memory_error();
1487 else
1488 sigbus_reraise();
1492 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
1494 struct qemu_work_item wi;
1496 if (env == current_env) {
1497 func(data);
1498 return;
1501 wi.func = func;
1502 wi.data = data;
1503 if (!env->kvm_cpu_state.queued_work_first)
1504 env->kvm_cpu_state.queued_work_first = &wi;
1505 else
1506 env->kvm_cpu_state.queued_work_last->next = &wi;
1507 env->kvm_cpu_state.queued_work_last = &wi;
1508 wi.next = NULL;
1509 wi.done = false;
1511 pthread_kill(env->kvm_cpu_state.thread, SIG_IPI);
1512 while (!wi.done)
1513 qemu_cond_wait(&qemu_work_cond);
1516 void kvm_arch_get_registers(CPUState *env)
1518 kvm_arch_save_regs(env);
1521 static void do_kvm_cpu_synchronize_state(void *_env)
1523 CPUState *env = _env;
1524 if (!env->kvm_cpu_state.regs_modified) {
1525 kvm_arch_get_registers(env);
1526 env->kvm_cpu_state.regs_modified = 1;
1530 void kvm_cpu_synchronize_state(CPUState *env)
1532 if (!env->kvm_cpu_state.regs_modified)
1533 on_vcpu(env, do_kvm_cpu_synchronize_state, env);
1536 static void inject_interrupt(void *data)
1538 cpu_interrupt(current_env, (long) data);
1541 void kvm_inject_interrupt(CPUState *env, int mask)
1543 on_vcpu(env, inject_interrupt, (void *) (long) mask);
1546 void kvm_update_interrupt_request(CPUState *env)
1548 int signal = 0;
1550 if (env) {
1551 if (!current_env || !current_env->created)
1552 signal = 1;
1554 * Testing for created here is really redundant
1556 if (current_env && current_env->created &&
1557 env != current_env && !env->kvm_cpu_state.signalled)
1558 signal = 1;
1560 if (signal) {
1561 env->kvm_cpu_state.signalled = 1;
1562 if (env->kvm_cpu_state.thread)
1563 pthread_kill(env->kvm_cpu_state.thread, SIG_IPI);
1568 static void kvm_do_load_registers(void *_env)
1570 CPUState *env = _env;
1572 kvm_arch_load_regs(env);
1575 void kvm_load_registers(CPUState *env)
1577 if (kvm_enabled() && qemu_system_ready)
1578 on_vcpu(env, kvm_do_load_registers, env);
1581 static void kvm_do_save_registers(void *_env)
1583 CPUState *env = _env;
1585 kvm_arch_save_regs(env);
1588 void kvm_save_registers(CPUState *env)
1590 if (kvm_enabled())
1591 on_vcpu(env, kvm_do_save_registers, env);
1594 static void kvm_do_load_mpstate(void *_env)
1596 CPUState *env = _env;
1598 kvm_arch_load_mpstate(env);
1601 void kvm_load_mpstate(CPUState *env)
1603 if (kvm_enabled() && qemu_system_ready)
1604 on_vcpu(env, kvm_do_load_mpstate, env);
1607 static void kvm_do_save_mpstate(void *_env)
1609 CPUState *env = _env;
1611 kvm_arch_save_mpstate(env);
1612 #ifdef KVM_CAP_MP_STATE
1613 if (kvm_irqchip_in_kernel())
1614 env->halted = (env->mp_state == KVM_MP_STATE_HALTED);
1615 #endif
1618 void kvm_save_mpstate(CPUState *env)
1620 if (kvm_enabled())
1621 on_vcpu(env, kvm_do_save_mpstate, env);
1624 int kvm_cpu_exec(CPUState *env)
1626 int r;
1628 r = kvm_run(env);
1629 if (r < 0) {
1630 printf("kvm_run returned %d\n", r);
1631 vm_stop(0);
1634 return 0;
1637 static int is_cpu_stopped(CPUState *env)
1639 return !vm_running || env->stopped;
1642 static void flush_queued_work(CPUState *env)
1644 struct qemu_work_item *wi;
1646 if (!env->kvm_cpu_state.queued_work_first)
1647 return;
1649 while ((wi = env->kvm_cpu_state.queued_work_first)) {
1650 env->kvm_cpu_state.queued_work_first = wi->next;
1651 wi->func(wi->data);
1652 wi->done = true;
1654 env->kvm_cpu_state.queued_work_last = NULL;
1655 pthread_cond_broadcast(&qemu_work_cond);
1658 static void kvm_on_sigbus(CPUState *env, siginfo_t *siginfo)
1660 #if defined(KVM_CAP_MCE) && defined(TARGET_I386)
1661 struct kvm_x86_mce mce = {
1662 .bank = 9,
1664 unsigned long paddr;
1665 int r;
1667 if (env->mcg_cap && siginfo->si_addr
1668 && (siginfo->si_code == BUS_MCEERR_AR
1669 || siginfo->si_code == BUS_MCEERR_AO)) {
1670 if (siginfo->si_code == BUS_MCEERR_AR) {
1671 /* Fake an Intel architectural Data Load SRAR UCR */
1672 mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
1673 | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
1674 | MCI_STATUS_AR | 0x134;
1675 mce.misc = (MCM_ADDR_PHYS << 6) | 0xc;
1676 mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
1677 } else {
1678 /* Fake an Intel architectural Memory scrubbing UCR */
1679 mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
1680 | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
1681 | 0xc0;
1682 mce.misc = (MCM_ADDR_PHYS << 6) | 0xc;
1683 mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1685 if (do_qemu_ram_addr_from_host((void *)siginfo->si_addr, &paddr)) {
1686 fprintf(stderr, "Hardware memory error for memory used by "
1687 "QEMU itself instaed of guest system!\n");
1688 /* Hope we are lucky for AO MCE */
1689 if (siginfo->si_code == BUS_MCEERR_AO)
1690 return;
1691 else
1692 hardware_memory_error();
1694 mce.addr = paddr;
1695 r = kvm_set_mce(env, &mce);
1696 if (r < 0) {
1697 fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
1698 abort();
1700 } else
1701 #endif
1703 if (siginfo->si_code == BUS_MCEERR_AO)
1704 return;
1705 else if (siginfo->si_code == BUS_MCEERR_AR)
1706 hardware_memory_error();
1707 else
1708 sigbus_reraise();
1712 static void kvm_main_loop_wait(CPUState *env, int timeout)
1714 struct timespec ts;
1715 int r, e;
1716 siginfo_t siginfo;
1717 sigset_t waitset;
1718 sigset_t chkset;
1720 ts.tv_sec = timeout / 1000;
1721 ts.tv_nsec = (timeout % 1000) * 1000000;
1722 sigemptyset(&waitset);
1723 sigaddset(&waitset, SIG_IPI);
1724 sigaddset(&waitset, SIGBUS);
1726 do {
1727 pthread_mutex_unlock(&qemu_mutex);
1729 r = sigtimedwait(&waitset, &siginfo, &ts);
1730 e = errno;
1732 pthread_mutex_lock(&qemu_mutex);
1734 if (r == -1 && !(e == EAGAIN || e == EINTR)) {
1735 printf("sigtimedwait: %s\n", strerror(e));
1736 exit(1);
1739 switch (r) {
1740 case SIGBUS:
1741 kvm_on_sigbus(env, &siginfo);
1742 break;
1743 default:
1744 break;
1747 r = sigpending(&chkset);
1748 if (r == -1) {
1749 printf("sigpending: %s\n", strerror(e));
1750 exit(1);
1752 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
1754 cpu_single_env = env;
1755 flush_queued_work(env);
1757 if (env->stop) {
1758 env->stop = 0;
1759 env->stopped = 1;
1760 pthread_cond_signal(&qemu_pause_cond);
1763 env->kvm_cpu_state.signalled = 0;
1766 static int all_threads_paused(void)
1768 CPUState *penv = first_cpu;
1770 while (penv) {
1771 if (penv->stop)
1772 return 0;
1773 penv = (CPUState *) penv->next_cpu;
1776 return 1;
1779 static void pause_all_threads(void)
1781 CPUState *penv = first_cpu;
1783 while (penv) {
1784 if (penv != cpu_single_env) {
1785 penv->stop = 1;
1786 pthread_kill(penv->kvm_cpu_state.thread, SIG_IPI);
1787 } else {
1788 penv->stop = 0;
1789 penv->stopped = 1;
1790 cpu_exit(penv);
1792 penv = (CPUState *) penv->next_cpu;
1795 while (!all_threads_paused())
1796 qemu_cond_wait(&qemu_pause_cond);
1799 static void resume_all_threads(void)
1801 CPUState *penv = first_cpu;
1803 assert(!cpu_single_env);
1805 while (penv) {
1806 penv->stop = 0;
1807 penv->stopped = 0;
1808 pthread_kill(penv->kvm_cpu_state.thread, SIG_IPI);
1809 penv = (CPUState *) penv->next_cpu;
1813 static void kvm_vm_state_change_handler(void *context, int running, int reason)
1815 if (running)
1816 resume_all_threads();
1817 else
1818 pause_all_threads();
1821 static void setup_kernel_sigmask(CPUState *env)
1823 sigset_t set;
1825 sigemptyset(&set);
1826 sigaddset(&set, SIGUSR2);
1827 sigaddset(&set, SIGIO);
1828 sigaddset(&set, SIGALRM);
1829 sigprocmask(SIG_BLOCK, &set, NULL);
1831 sigprocmask(SIG_BLOCK, NULL, &set);
1832 sigdelset(&set, SIG_IPI);
1833 sigdelset(&set, SIGBUS);
1835 kvm_set_signal_mask(env, &set);
1838 static void qemu_kvm_system_reset(void)
1840 CPUState *penv = first_cpu;
1842 pause_all_threads();
1844 qemu_system_reset();
1846 while (penv) {
1847 kvm_arch_cpu_reset(penv);
1848 penv = (CPUState *) penv->next_cpu;
1851 resume_all_threads();
1854 static void process_irqchip_events(CPUState *env)
1856 kvm_arch_process_irqchip_events(env);
1857 if (kvm_arch_has_work(env))
1858 env->halted = 0;
1861 static int kvm_main_loop_cpu(CPUState *env)
1863 while (1) {
1864 int run_cpu = !is_cpu_stopped(env);
1865 if (run_cpu && !kvm_irqchip_in_kernel()) {
1866 process_irqchip_events(env);
1867 run_cpu = !env->halted;
1869 if (run_cpu) {
1870 kvm_main_loop_wait(env, 0);
1871 kvm_cpu_exec(env);
1872 } else {
1873 kvm_main_loop_wait(env, 1000);
1876 pthread_mutex_unlock(&qemu_mutex);
1877 return 0;
1880 static void *ap_main_loop(void *_env)
1882 CPUState *env = _env;
1883 sigset_t signals;
1884 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1885 struct ioperm_data *data = NULL;
1886 #endif
1888 current_env = env;
1889 env->thread_id = kvm_get_thread_id();
1890 sigfillset(&signals);
1891 sigprocmask(SIG_BLOCK, &signals, NULL);
1892 kvm_create_vcpu(env, env->cpu_index);
1894 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1895 /* do ioperm for io ports of assigned devices */
1896 QLIST_FOREACH(data, &ioperm_head, entries)
1897 on_vcpu(env, kvm_arch_do_ioperm, data);
1898 #endif
1900 setup_kernel_sigmask(env);
1902 pthread_mutex_lock(&qemu_mutex);
1903 cpu_single_env = env;
1905 kvm_arch_init_vcpu(env);
1907 kvm_arch_load_regs(env);
1909 /* signal VCPU creation */
1910 current_env->created = 1;
1911 pthread_cond_signal(&qemu_vcpu_cond);
1913 /* and wait for machine initialization */
1914 while (!qemu_system_ready)
1915 qemu_cond_wait(&qemu_system_cond);
1917 /* re-initialize cpu_single_env after re-acquiring qemu_mutex */
1918 cpu_single_env = env;
1920 kvm_main_loop_cpu(env);
1921 return NULL;
1924 void kvm_init_vcpu(CPUState *env)
1926 pthread_create(&env->kvm_cpu_state.thread, NULL, ap_main_loop, env);
1928 while (env->created == 0)
1929 qemu_cond_wait(&qemu_vcpu_cond);
1932 int kvm_vcpu_inited(CPUState *env)
1934 return env->created;
1937 #ifdef TARGET_I386
1938 void kvm_hpet_disable_kpit(void)
1940 struct kvm_pit_state2 ps2;
1942 kvm_get_pit2(kvm_context, &ps2);
1943 ps2.flags |= KVM_PIT_FLAGS_HPET_LEGACY;
1944 kvm_set_pit2(kvm_context, &ps2);
1947 void kvm_hpet_enable_kpit(void)
1949 struct kvm_pit_state2 ps2;
1951 kvm_get_pit2(kvm_context, &ps2);
1952 ps2.flags &= ~KVM_PIT_FLAGS_HPET_LEGACY;
1953 kvm_set_pit2(kvm_context, &ps2);
1955 #endif
1957 int kvm_init_ap(void)
1959 struct sigaction action;
1961 qemu_add_vm_change_state_handler(kvm_vm_state_change_handler, NULL);
1963 signal(SIG_IPI, sig_ipi_handler);
1965 memset(&action, 0, sizeof(action));
1966 action.sa_flags = SA_SIGINFO;
1967 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
1968 sigaction(SIGBUS, &action, NULL);
1969 prctl(PR_MCE_KILL, 1, 1);
1970 return 0;
1973 void qemu_kvm_notify_work(void)
1975 uint64_t value = 1;
1976 char buffer[8];
1977 size_t offset = 0;
1979 if (io_thread_fd == -1)
1980 return;
1982 memcpy(buffer, &value, sizeof(value));
1984 while (offset < 8) {
1985 ssize_t len;
1987 len = write(io_thread_fd, buffer + offset, 8 - offset);
1988 if (len == -1 && errno == EINTR)
1989 continue;
1991 /* In case we have a pipe, there is not reason to insist writing
1992 * 8 bytes
1994 if (len == -1 && errno == EAGAIN)
1995 break;
1997 if (len <= 0)
1998 break;
2000 offset += len;
2004 /* If we have signalfd, we mask out the signals we want to handle and then
2005 * use signalfd to listen for them. We rely on whatever the current signal
2006 * handler is to dispatch the signals when we receive them.
2009 static void sigfd_handler(void *opaque)
2011 int fd = (unsigned long) opaque;
2012 struct qemu_signalfd_siginfo info;
2013 struct sigaction action;
2014 ssize_t len;
2016 while (1) {
2017 do {
2018 len = read(fd, &info, sizeof(info));
2019 } while (len == -1 && errno == EINTR);
2021 if (len == -1 && errno == EAGAIN)
2022 break;
2024 if (len != sizeof(info)) {
2025 printf("read from sigfd returned %zd: %m\n", len);
2026 return;
2029 sigaction(info.ssi_signo, NULL, &action);
2030 if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction)
2031 action.sa_sigaction(info.ssi_signo,
2032 (siginfo_t *)&info, NULL);
2033 else if (action.sa_handler)
2034 action.sa_handler(info.ssi_signo);
2039 /* Used to break IO thread out of select */
2040 static void io_thread_wakeup(void *opaque)
2042 int fd = (unsigned long) opaque;
2043 char buffer[4096];
2045 /* Drain the pipe/(eventfd) */
2046 while (1) {
2047 ssize_t len;
2049 len = read(fd, buffer, sizeof(buffer));
2050 if (len == -1 && errno == EINTR)
2051 continue;
2053 if (len <= 0)
2054 break;
2058 int kvm_main_loop(void)
2060 int fds[2];
2061 sigset_t mask;
2062 int sigfd;
2064 io_thread = pthread_self();
2065 qemu_system_ready = 1;
2067 if (qemu_eventfd(fds) == -1) {
2068 fprintf(stderr, "failed to create eventfd\n");
2069 return -errno;
2072 fcntl(fds[0], F_SETFL, O_NONBLOCK);
2073 fcntl(fds[1], F_SETFL, O_NONBLOCK);
2075 qemu_set_fd_handler2(fds[0], NULL, io_thread_wakeup, NULL,
2076 (void *)(unsigned long) fds[0]);
2078 io_thread_fd = fds[1];
2080 sigemptyset(&mask);
2081 sigaddset(&mask, SIGIO);
2082 sigaddset(&mask, SIGALRM);
2083 sigaddset(&mask, SIGBUS);
2084 sigprocmask(SIG_BLOCK, &mask, NULL);
2086 sigfd = qemu_signalfd(&mask);
2087 if (sigfd == -1) {
2088 fprintf(stderr, "failed to create signalfd\n");
2089 return -errno;
2092 fcntl(sigfd, F_SETFL, O_NONBLOCK);
2094 qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL,
2095 (void *)(unsigned long) sigfd);
2097 pthread_cond_broadcast(&qemu_system_cond);
2099 io_thread_sigfd = sigfd;
2100 cpu_single_env = NULL;
2102 while (1) {
2103 main_loop_wait(1000);
2104 if (qemu_shutdown_requested()) {
2105 if (qemu_no_shutdown()) {
2106 vm_stop(0);
2107 } else
2108 break;
2109 } else if (qemu_powerdown_requested())
2110 qemu_irq_raise(qemu_system_powerdown);
2111 else if (qemu_reset_requested())
2112 qemu_kvm_system_reset();
2113 else if (kvm_debug_cpu_requested) {
2114 gdb_set_stop_cpu(kvm_debug_cpu_requested);
2115 vm_stop(EXCP_DEBUG);
2116 kvm_debug_cpu_requested = NULL;
2120 pause_all_threads();
2121 pthread_mutex_unlock(&qemu_mutex);
2123 return 0;
2126 #ifdef TARGET_I386
2127 static int destroy_region_works = 0;
2128 #endif
2131 #if !defined(TARGET_I386)
2132 int kvm_arch_init_irq_routing(void)
2134 return 0;
2136 #endif
2138 extern int no_hpet;
2140 static int kvm_create_context(void)
2142 int r;
2144 if (!kvm_irqchip) {
2145 kvm_disable_irqchip_creation(kvm_context);
2147 if (!kvm_pit) {
2148 kvm_disable_pit_creation(kvm_context);
2150 if (kvm_create(kvm_context, 0, NULL) < 0) {
2151 kvm_finalize(kvm_state);
2152 return -1;
2154 r = kvm_arch_qemu_create_context();
2155 if (r < 0)
2156 kvm_finalize(kvm_state);
2157 if (kvm_pit && !kvm_pit_reinject) {
2158 if (kvm_reinject_control(kvm_context, 0)) {
2159 fprintf(stderr, "failure to disable in-kernel PIT reinjection\n");
2160 return -1;
2163 #ifdef TARGET_I386
2164 destroy_region_works = kvm_destroy_memory_region_works(kvm_context);
2165 #endif
2167 r = kvm_arch_init_irq_routing();
2168 if (r < 0) {
2169 return r;
2172 kvm_init_ap();
2173 if (kvm_irqchip) {
2174 if (!qemu_kvm_has_gsi_routing()) {
2175 irq0override = 0;
2176 #ifdef TARGET_I386
2177 /* if kernel can't do irq routing, interrupt source
2178 * override 0->2 can not be set up as required by hpet,
2179 * so disable hpet.
2181 no_hpet = 1;
2182 } else if (!qemu_kvm_has_pit_state2()) {
2183 no_hpet = 1;
2185 #else
2187 #endif
2190 return 0;
2193 #ifdef TARGET_I386
2194 static int must_use_aliases_source(target_phys_addr_t addr)
2196 if (destroy_region_works)
2197 return false;
2198 if (addr == 0xa0000 || addr == 0xa8000)
2199 return true;
2200 return false;
2203 static int must_use_aliases_target(target_phys_addr_t addr)
2205 if (destroy_region_works)
2206 return false;
2207 if (addr >= 0xe0000000 && addr < 0x100000000ull)
2208 return true;
2209 return false;
2212 static struct mapping {
2213 target_phys_addr_t phys;
2214 ram_addr_t ram;
2215 ram_addr_t len;
2216 } mappings[50];
2217 static int nr_mappings;
2219 static struct mapping *find_ram_mapping(ram_addr_t ram_addr)
2221 struct mapping *p;
2223 for (p = mappings; p < mappings + nr_mappings; ++p) {
2224 if (p->ram <= ram_addr && ram_addr < p->ram + p->len) {
2225 return p;
2228 return NULL;
2231 static struct mapping *find_mapping(target_phys_addr_t start_addr)
2233 struct mapping *p;
2235 for (p = mappings; p < mappings + nr_mappings; ++p) {
2236 if (p->phys <= start_addr && start_addr < p->phys + p->len) {
2237 return p;
2240 return NULL;
2243 static void drop_mapping(target_phys_addr_t start_addr)
2245 struct mapping *p = find_mapping(start_addr);
2247 if (p)
2248 *p = mappings[--nr_mappings];
2250 #endif
2252 void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
2253 ram_addr_t phys_offset)
2255 int r = 0;
2256 unsigned long area_flags;
2257 #ifdef TARGET_I386
2258 struct mapping *p;
2259 #endif
2261 if (start_addr + size > phys_ram_size) {
2262 phys_ram_size = start_addr + size;
2265 phys_offset &= ~IO_MEM_ROM;
2266 area_flags = phys_offset & ~TARGET_PAGE_MASK;
2268 if (area_flags != IO_MEM_RAM) {
2269 #ifdef TARGET_I386
2270 if (must_use_aliases_source(start_addr)) {
2271 kvm_destroy_memory_alias(kvm_context, start_addr);
2272 return;
2274 if (must_use_aliases_target(start_addr))
2275 return;
2276 #endif
2277 while (size > 0) {
2278 p = find_mapping(start_addr);
2279 if (p) {
2280 kvm_unregister_memory_area(kvm_context, p->phys, p->len);
2281 drop_mapping(p->phys);
2283 start_addr += TARGET_PAGE_SIZE;
2284 if (size > TARGET_PAGE_SIZE) {
2285 size -= TARGET_PAGE_SIZE;
2286 } else {
2287 size = 0;
2290 return;
2293 r = kvm_is_containing_region(kvm_context, start_addr, size);
2294 if (r)
2295 return;
2297 if (area_flags >= TLB_MMIO)
2298 return;
2300 #ifdef TARGET_I386
2301 if (must_use_aliases_source(start_addr)) {
2302 p = find_ram_mapping(phys_offset);
2303 if (p) {
2304 kvm_create_memory_alias(kvm_context, start_addr, size,
2305 p->phys + (phys_offset - p->ram));
2307 return;
2309 #endif
2311 r = kvm_register_phys_mem(kvm_context, start_addr,
2312 qemu_get_ram_ptr(phys_offset), size, 0);
2313 if (r < 0) {
2314 printf("kvm_cpu_register_physical_memory: failed\n");
2315 exit(1);
2317 #ifdef TARGET_I386
2318 drop_mapping(start_addr);
2319 p = &mappings[nr_mappings++];
2320 p->phys = start_addr;
2321 p->ram = phys_offset;
2322 p->len = size;
2323 #endif
2325 return;
2328 int kvm_setup_guest_memory(void *area, unsigned long size)
2330 int ret = 0;
2332 #ifdef MADV_DONTFORK
2333 if (kvm_enabled() && !kvm_has_sync_mmu())
2334 ret = madvise(area, size, MADV_DONTFORK);
2335 #endif
2337 if (ret)
2338 perror("madvise");
2340 return ret;
2343 #ifdef KVM_CAP_SET_GUEST_DEBUG
2345 struct kvm_set_guest_debug_data {
2346 struct kvm_guest_debug dbg;
2347 int err;
2350 static void kvm_invoke_set_guest_debug(void *data)
2352 struct kvm_set_guest_debug_data *dbg_data = data;
2354 if (cpu_single_env->kvm_cpu_state.regs_modified) {
2355 kvm_arch_put_registers(cpu_single_env);
2356 cpu_single_env->kvm_cpu_state.regs_modified = 0;
2358 dbg_data->err =
2359 kvm_set_guest_debug(cpu_single_env,
2360 &dbg_data->dbg);
2363 int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
2365 struct kvm_set_guest_debug_data data;
2367 data.dbg.control = 0;
2368 if (env->singlestep_enabled)
2369 data.dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2371 kvm_arch_update_guest_debug(env, &data.dbg);
2372 data.dbg.control |= reinject_trap;
2374 on_vcpu(env, kvm_invoke_set_guest_debug, &data);
2375 return data.err;
2378 #endif
2381 * dirty pages logging
2383 /* FIXME: use unsigned long pointer instead of unsigned char */
2384 unsigned char *kvm_dirty_bitmap = NULL;
2385 int kvm_physical_memory_set_dirty_tracking(int enable)
2387 int r = 0;
2389 if (!kvm_enabled())
2390 return 0;
2392 if (enable) {
2393 if (!kvm_dirty_bitmap) {
2394 unsigned bitmap_size = BITMAP_SIZE(phys_ram_size);
2395 kvm_dirty_bitmap = qemu_malloc(bitmap_size);
2396 r = kvm_dirty_pages_log_enable_all(kvm_context);
2398 } else {
2399 if (kvm_dirty_bitmap) {
2400 r = kvm_dirty_pages_log_reset(kvm_context);
2401 qemu_free(kvm_dirty_bitmap);
2402 kvm_dirty_bitmap = NULL;
2405 return r;
2408 /* get kvm's dirty pages bitmap and update qemu's */
2409 static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
2410 unsigned char *bitmap,
2411 unsigned long offset,
2412 unsigned long mem_size)
2414 unsigned int i, j, n = 0;
2415 unsigned char c;
2416 unsigned long page_number, addr, addr1;
2417 ram_addr_t ram_addr;
2418 unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + 7) / 8;
2421 * bitmap-traveling is faster than memory-traveling (for addr...)
2422 * especially when most of the memory is not dirty.
2424 for (i = 0; i < len; i++) {
2425 c = bitmap[i];
2426 while (c > 0) {
2427 j = ffsl(c) - 1;
2428 c &= ~(1u << j);
2429 page_number = i * 8 + j;
2430 addr1 = page_number * TARGET_PAGE_SIZE;
2431 addr = offset + addr1;
2432 ram_addr = cpu_get_physical_page_desc(addr);
2433 cpu_physical_memory_set_dirty(ram_addr);
2434 n++;
2437 return 0;
2440 static int kvm_get_dirty_bitmap_cb(unsigned long start, unsigned long len,
2441 void *bitmap, void *opaque)
2443 return kvm_get_dirty_pages_log_range(start, bitmap, start, len);
2447 * get kvm's dirty pages bitmap and update qemu's
2448 * we only care about physical ram, which resides in slots 0 and 3
2450 int kvm_update_dirty_pages_log(void)
2452 int r = 0;
2455 r = kvm_get_dirty_pages_range(kvm_context, 0, -1UL, NULL,
2456 kvm_get_dirty_bitmap_cb);
2457 return r;
2460 void kvm_qemu_log_memory(target_phys_addr_t start, target_phys_addr_t size,
2461 int log)
2463 if (log)
2464 kvm_dirty_pages_log_enable_slot(kvm_context, start, size);
2465 else {
2466 #ifdef TARGET_I386
2467 if (must_use_aliases_target(start))
2468 return;
2469 #endif
2470 kvm_dirty_pages_log_disable_slot(kvm_context, start, size);
2474 #ifdef KVM_CAP_IRQCHIP
2476 int kvm_set_irq(int irq, int level, int *status)
2478 return kvm_set_irq_level(kvm_context, irq, level, status);
2481 #endif
2483 int qemu_kvm_get_dirty_pages(unsigned long phys_addr, void *buf)
2485 return kvm_get_dirty_pages(kvm_context, phys_addr, buf);
2488 void kvm_mutex_unlock(void)
2490 assert(!cpu_single_env);
2491 pthread_mutex_unlock(&qemu_mutex);
2494 void kvm_mutex_lock(void)
2496 pthread_mutex_lock(&qemu_mutex);
2497 cpu_single_env = NULL;
2500 void qemu_mutex_unlock_iothread(void)
2502 if (kvm_enabled())
2503 kvm_mutex_unlock();
2506 void qemu_mutex_lock_iothread(void)
2508 if (kvm_enabled())
2509 kvm_mutex_lock();
2512 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2513 void kvm_add_ioperm_data(struct ioperm_data *data)
2515 QLIST_INSERT_HEAD(&ioperm_head, data, entries);
2518 void kvm_remove_ioperm_data(unsigned long start_port, unsigned long num)
2520 struct ioperm_data *data;
2522 data = QLIST_FIRST(&ioperm_head);
2523 while (data) {
2524 struct ioperm_data *next = QLIST_NEXT(data, entries);
2526 if (data->start_port == start_port && data->num == num) {
2527 QLIST_REMOVE(data, entries);
2528 qemu_free(data);
2531 data = next;
2535 void kvm_ioperm(CPUState *env, void *data)
2537 if (kvm_enabled() && qemu_system_ready)
2538 on_vcpu(env, kvm_arch_do_ioperm, data);
2541 #endif
2543 int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2544 target_phys_addr_t end_addr)
2546 #ifndef TARGET_IA64
2548 #ifdef TARGET_I386
2549 if (must_use_aliases_source(start_addr))
2550 return 0;
2551 #endif
2553 kvm_get_dirty_pages_range(kvm_context, start_addr,
2554 end_addr - start_addr, NULL,
2555 kvm_get_dirty_bitmap_cb);
2556 #endif
2557 return 0;
2560 int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len)
2562 #ifdef TARGET_I386
2563 if (must_use_aliases_source(phys_addr))
2564 return 0;
2565 #endif
2567 #ifndef TARGET_IA64
2568 kvm_qemu_log_memory(phys_addr, len, 1);
2569 #endif
2570 return 0;
2573 int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len)
2575 #ifdef TARGET_I386
2576 if (must_use_aliases_source(phys_addr))
2577 return 0;
2578 #endif
2580 #ifndef TARGET_IA64
2581 kvm_qemu_log_memory(phys_addr, len, 0);
2582 #endif
2583 return 0;
2586 int kvm_set_boot_cpu_id(uint32_t id)
2588 return kvm_set_boot_vcpu_id(kvm_context, id);
2591 #ifdef TARGET_I386
2592 #ifdef KVM_CAP_MCE
2593 struct kvm_x86_mce_data {
2594 CPUState *env;
2595 struct kvm_x86_mce *mce;
2596 int abort_on_error;
2599 static void kvm_do_inject_x86_mce(void *_data)
2601 struct kvm_x86_mce_data *data = _data;
2602 int r;
2604 r = kvm_set_mce(data->env, data->mce);
2605 if (r < 0) {
2606 perror("kvm_set_mce FAILED");
2607 if (data->abort_on_error)
2608 abort();
2611 #endif
2613 void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
2614 uint64_t mcg_status, uint64_t addr, uint64_t misc,
2615 int abort_on_error)
2617 #ifdef KVM_CAP_MCE
2618 struct kvm_x86_mce mce = {
2619 .bank = bank,
2620 .status = status,
2621 .mcg_status = mcg_status,
2622 .addr = addr,
2623 .misc = misc,
2625 struct kvm_x86_mce_data data = {
2626 .env = cenv,
2627 .mce = &mce,
2628 .abort_on_error = abort_on_error,
2631 if (!cenv->mcg_cap) {
2632 fprintf(stderr, "MCE support is not enabled!\n");
2633 return;
2635 on_vcpu(cenv, kvm_do_inject_x86_mce, &data);
2636 #else
2637 if (abort_on_error)
2638 abort();
2639 #endif
2641 #endif