kvm: libkvm: merge the old save params functions with memslot tracking
[qemu-kvm/fedora.git] / kvm / libkvm / libkvm.c
bloba365aebce550c92a1083ec6a82e75fd080b88b37
1 /*
2 * Kernel-based Virtual Machine control library
4 * This library provides an API to control the kvm hardware virtualization
5 * module.
7 * Copyright (C) 2006 Qumranet
9 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
14 * This work is licensed under the GNU LGPL license, version 2.
17 #ifndef __user
18 #define __user /* temporary, until installed via make headers_install */
19 #endif
21 #include <linux/kvm.h>
23 #define EXPECTED_KVM_API_VERSION 12
25 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
26 #error libkvm: userspace and kernel version mismatch
27 #endif
29 #include <unistd.h>
30 #include <fcntl.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <sys/mman.h>
34 #include <string.h>
35 #include <errno.h>
36 #include <sys/ioctl.h>
37 #include "libkvm.h"
38 #include "kvm-abi-10.h"
40 #if defined(__x86_64__) || defined(__i386__)
41 #include "kvm-x86.h"
42 #endif
44 int kvm_abi = EXPECTED_KVM_API_VERSION;
46 struct slot_info {
47 unsigned long phys_addr;
48 unsigned long len;
49 int user_alloc;
50 unsigned long userspace_addr;
53 struct slot_info slots[KVM_MAX_NUM_MEM_REGIONS];
55 void init_slots(void)
57 int i;
59 for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
60 slots[i].len = 0;
63 int get_free_slot(kvm_context_t kvm)
65 int i;
66 int tss_ext;
68 #ifdef KVM_CAP_SET_TSS_ADDR
69 tss_ext = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
70 #else
71 tss_ext = 0;
72 #endif
75 * on older kernels where the set tss ioctl is not supprted we must save
76 * slot 0 to hold the extended memory, as the vmx will use the last 3
77 * pages of this slot.
79 if (tss_ext > 0)
80 i = 0;
81 else
82 i = 1;
84 for (; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
85 if (!slots[i].len)
86 return i;
87 return -1;
90 void register_slot(int slot, unsigned long phys_addr, unsigned long len,
91 int user_alloc, unsigned long userspace_addr)
93 slots[slot].phys_addr = phys_addr;
94 slots[slot].len = len;
95 slots[slot].user_alloc = user_alloc;
96 slots[slot].userspace_addr = userspace_addr;
99 void free_slot(int slot)
101 slots[slot].len = 0;
104 int get_slot(unsigned long phys_addr)
106 int i;
108 for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS ; ++i) {
109 if (slots[i].len && slots[i].phys_addr <= phys_addr &&
110 (slots[i].phys_addr + slots[i].len) >= phys_addr)
111 return i;
113 return -1;
117 * dirty pages logging control
119 static int kvm_dirty_pages_log_change(kvm_context_t kvm, unsigned long phys_addr
120 , __u32 flag)
122 int r;
123 int slot;
125 slot = get_slot(phys_addr);
126 if (slot == -1) {
127 fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
128 return 1;
130 #ifdef KVM_CAP_USER_MEMORY
131 if (slots[slot].user_alloc) {
132 struct kvm_userspace_memory_region mem = {
133 .slot = slot,
134 .memory_size = slots[slot].len,
135 .guest_phys_addr = slots[slot].phys_addr,
136 .userspace_addr = slots[slot].userspace_addr,
137 .flags = flag,
139 r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
141 #endif
142 if (!slots[slot].user_alloc) {
143 struct kvm_memory_region mem = {
144 .slot = slot,
145 .memory_size = slots[slot].len,
146 .guest_phys_addr = slots[slot].phys_addr,
147 .flags = flag,
149 r = ioctl(kvm->vm_fd, KVM_SET_MEMORY_REGION, &mem);
151 if (r == -1)
152 fprintf(stderr, "%s: %m\n", __FUNCTION__);
153 return r;
156 static int kvm_dirty_pages_log_change_all(kvm_context_t kvm, __u32 flag)
158 int i, r;
160 for (i=r=0; i<KVM_MAX_NUM_MEM_REGIONS && r==0; i++) {
161 if (slots[i].len)
162 r = kvm_dirty_pages_log_change(kvm, slots[i].phys_addr,
163 flag);
165 return r;
169 * Enable dirty page logging for all memory regions
171 int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
173 if (kvm->dirty_pages_log_all)
174 return 0;
175 kvm->dirty_pages_log_all = 1;
176 return kvm_dirty_pages_log_change_all(kvm, KVM_MEM_LOG_DIRTY_PAGES);
180 * Enable dirty page logging only for memory regions that were created with
181 * dirty logging enabled (disable for all other memory regions).
183 int kvm_dirty_pages_log_reset(kvm_context_t kvm)
185 if (!kvm->dirty_pages_log_all)
186 return 0;
187 kvm->dirty_pages_log_all = 0;
188 return kvm_dirty_pages_log_change_all(kvm, 0);
192 kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
193 void *opaque)
195 int fd;
196 kvm_context_t kvm;
197 int r;
199 fd = open("/dev/kvm", O_RDWR);
200 if (fd == -1) {
201 perror("open /dev/kvm");
202 return NULL;
204 r = ioctl(fd, KVM_GET_API_VERSION, 0);
205 if (r == -1) {
206 fprintf(stderr, "kvm kernel version too old: "
207 "KVM_GET_API_VERSION ioctl not supported\n");
208 goto out_close;
210 if (r < EXPECTED_KVM_API_VERSION && r != 10) {
211 fprintf(stderr, "kvm kernel version too old: "
212 "We expect API version %d or newer, but got "
213 "version %d\n",
214 EXPECTED_KVM_API_VERSION, r);
215 goto out_close;
217 if (r > EXPECTED_KVM_API_VERSION) {
218 fprintf(stderr, "kvm userspace version too old\n");
219 goto out_close;
221 kvm_abi = r;
222 kvm = malloc(sizeof(*kvm));
223 kvm->fd = fd;
224 kvm->vm_fd = -1;
225 kvm->callbacks = callbacks;
226 kvm->opaque = opaque;
227 kvm->dirty_pages_log_all = 0;
228 kvm->no_irqchip_creation = 0;
229 memset(&kvm->mem_regions, 0, sizeof(kvm->mem_regions));
231 return kvm;
232 out_close:
233 close(fd);
234 return NULL;
237 void kvm_finalize(kvm_context_t kvm)
239 if (kvm->vcpu_fd[0] != -1)
240 close(kvm->vcpu_fd[0]);
241 if (kvm->vm_fd != -1)
242 close(kvm->vm_fd);
243 close(kvm->fd);
244 free(kvm);
247 void kvm_disable_irqchip_creation(kvm_context_t kvm)
249 kvm->no_irqchip_creation = 1;
252 int kvm_create_vcpu(kvm_context_t kvm, int slot)
254 long mmap_size;
255 int r;
257 r = ioctl(kvm->vm_fd, KVM_CREATE_VCPU, slot);
258 if (r == -1) {
259 r = -errno;
260 fprintf(stderr, "kvm_create_vcpu: %m\n");
261 return r;
263 kvm->vcpu_fd[slot] = r;
264 mmap_size = ioctl(kvm->fd, KVM_GET_VCPU_MMAP_SIZE, 0);
265 if (mmap_size == -1) {
266 r = -errno;
267 fprintf(stderr, "get vcpu mmap size: %m\n");
268 return r;
270 kvm->run[slot] = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED,
271 kvm->vcpu_fd[slot], 0);
272 if (kvm->run[slot] == MAP_FAILED) {
273 r = -errno;
274 fprintf(stderr, "mmap vcpu area: %m\n");
275 return r;
277 return 0;
280 int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
282 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
283 int r;
285 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
286 KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
287 if (r > 0) {
288 r = ioctl(kvm->vm_fd, KVM_SET_NR_MMU_PAGES, nrshadow_pages);
289 if (r == -1) {
290 fprintf(stderr, "kvm_set_shadow_pages: %m\n");
291 return -errno;
293 return 0;
295 #endif
296 return -1;
299 int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
301 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
302 int r;
304 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
305 KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
306 if (r > 0) {
307 *nrshadow_pages = ioctl(kvm->vm_fd, KVM_GET_NR_MMU_PAGES);
308 return 0;
310 #endif
311 return -1;
315 int kvm_create_vm(kvm_context_t kvm)
317 int fd = kvm->fd;
319 kvm->vcpu_fd[0] = -1;
321 fd = ioctl(fd, KVM_CREATE_VM, 0);
322 if (fd == -1) {
323 fprintf(stderr, "kvm_create_vm: %m\n");
324 return -1;
326 kvm->vm_fd = fd;
327 return 0;
330 static int kvm_create_default_phys_mem(kvm_context_t kvm,
331 unsigned long phys_mem_bytes,
332 void **vm_mem)
334 unsigned long memory = (phys_mem_bytes + PAGE_SIZE - 1) & PAGE_MASK;
335 int r;
337 #ifdef KVM_CAP_USER_MEMORY
338 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
339 if (r > 0)
340 r = kvm_alloc_userspace_memory(kvm, memory, vm_mem);
341 else
342 #endif
343 r = kvm_alloc_kernel_memory(kvm, memory, vm_mem);
344 if (r < 0)
345 return r;
347 r = kvm_arch_create_default_phys_mem(kvm, phys_mem_bytes, vm_mem);
348 if (r < 0)
349 return r;
351 kvm->physical_memory = *vm_mem;
352 return 0;
355 void kvm_create_irqchip(kvm_context_t kvm)
357 int r;
359 kvm->irqchip_in_kernel = 0;
360 #ifdef KVM_CAP_IRQCHIP
361 if (!kvm->no_irqchip_creation) {
362 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP);
363 if (r > 0) { /* kernel irqchip supported */
364 r = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
365 if (r >= 0)
366 kvm->irqchip_in_kernel = 1;
367 else
368 printf("Create kernel PIC irqchip failed\n");
371 #endif
374 int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem)
376 int r;
378 r = kvm_create_vm(kvm);
379 if (r < 0)
380 return r;
381 r = kvm_arch_create(kvm, phys_mem_bytes, vm_mem);
382 if (r < 0)
383 return r;
384 init_slots();
385 r = kvm_create_default_phys_mem(kvm, phys_mem_bytes, vm_mem);
386 if (r < 0)
387 return r;
388 kvm_create_irqchip(kvm);
389 r = kvm_create_vcpu(kvm, 0);
390 if (r < 0)
391 return r;
393 return 0;
397 #ifdef KVM_CAP_USER_MEMORY
399 void *kvm_create_userspace_phys_mem(kvm_context_t kvm, unsigned long phys_start,
400 unsigned long len, int log, int writable)
402 int r;
403 int prot = PROT_READ;
404 void *ptr;
405 struct kvm_userspace_memory_region memory = {
406 .memory_size = len,
407 .guest_phys_addr = phys_start,
408 .flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
411 if (writable)
412 prot |= PROT_WRITE;
414 ptr = mmap(NULL, len, prot, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
415 if (ptr == MAP_FAILED) {
416 fprintf(stderr, "create_userspace_phys_mem: %s", strerror(errno));
417 return 0;
420 memset(ptr, 0, len);
422 memory.userspace_addr = (unsigned long)ptr;
423 memory.slot = get_free_slot(kvm);
424 r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &memory);
425 if (r == -1) {
426 fprintf(stderr, "create_userspace_phys_mem: %s", strerror(errno));
427 return 0;
429 register_slot(memory.slot, memory.guest_phys_addr, memory.memory_size,
430 1, memory.userspace_addr);
432 return ptr;
435 #endif
437 void *kvm_create_phys_mem(kvm_context_t kvm, unsigned long phys_start,
438 unsigned long len, int log, int writable)
440 #ifdef KVM_CAP_USER_MEMORY
441 int r;
443 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
444 if (r > 0)
445 return kvm_create_userspace_phys_mem(kvm, phys_start, len,
446 log, writable);
447 else
448 #endif
449 return kvm_create_kernel_phys_mem(kvm, phys_start, len,
450 log, writable);
453 int kvm_register_userspace_phys_mem(kvm_context_t kvm,
454 unsigned long phys_start, void *userspace_addr,
455 unsigned long len, int log)
457 #ifdef KVM_CAP_USER_MEMORY
458 struct kvm_userspace_memory_region memory = {
459 .memory_size = len,
460 .guest_phys_addr = phys_start,
461 .userspace_addr = (intptr_t)userspace_addr,
462 .flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
464 int r;
466 if (!kvm->physical_memory)
467 kvm->physical_memory = userspace_addr - phys_start;
469 memory.slot = get_free_slot(kvm);
470 r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &memory);
471 if (r == -1) {
472 fprintf(stderr, "create_userspace_phys_mem: %s\n", strerror(errno));
473 return -1;
475 register_slot(memory.slot, memory.guest_phys_addr, memory.memory_size,
476 1, memory.userspace_addr);
477 return 0;
478 #else
479 return -ENOSYS;
480 #endif
484 /* destroy/free a whole slot.
485 * phys_start, len and slot are the params passed to kvm_create_phys_mem()
487 void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
488 unsigned long len)
490 int slot;
491 struct kvm_memory_region *mem;
493 slot = get_slot(phys_start);
495 if (slot >= KVM_MAX_NUM_MEM_REGIONS) {
496 fprintf(stderr, "BUG: %s: invalid parameters (slot=%d)\n",
497 __FUNCTION__, slot);
498 return;
500 mem = &kvm->mem_regions[slot];
501 if (phys_start != mem->guest_phys_addr) {
502 fprintf(stderr,
503 "WARNING: %s: phys_start is 0x%lx expecting 0x%llx\n",
504 __FUNCTION__, phys_start, mem->guest_phys_addr);
505 phys_start = mem->guest_phys_addr;
507 kvm_create_phys_mem(kvm, phys_start, 0, 0, 0);
510 static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
512 int r;
513 struct kvm_dirty_log log = {
514 .slot = slot,
517 log.dirty_bitmap = buf;
519 r = ioctl(kvm->vm_fd, ioctl_num, &log);
520 if (r == -1)
521 return -errno;
522 return 0;
525 int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
527 int slot;
529 slot = get_slot(phys_addr);
530 return kvm_get_map(kvm, KVM_GET_DIRTY_LOG, slot, buf);
533 int kvm_get_mem_map(kvm_context_t kvm, unsigned long phys_addr, void *buf)
535 int slot;
537 slot = get_slot(phys_addr);
538 #ifdef KVM_GET_MEM_MAP
539 return kvm_get_map(kvm, KVM_GET_MEM_MAP, slot, buf);
540 #else /* not KVM_GET_MEM_MAP ==> fake it: all pages exist */
541 unsigned long i, n, m, npages;
542 unsigned char v;
544 if (slot >= KVM_MAX_NUM_MEM_REGIONS) {
545 errno = -EINVAL;
546 return -1;
548 npages = kvm->mem_regions[slot].memory_size / PAGE_SIZE;
549 n = npages / 8;
550 m = npages % 8;
551 memset(buf, 0xff, n); /* all pages exist */
552 v = 0;
553 for (i=0; i<=m; i++) /* last byte may not be "aligned" */
554 v |= 1<<(7-i);
555 if (v)
556 *(unsigned char*)(buf+n) = v;
557 return 0;
558 #endif /* KVM_GET_MEM_MAP */
561 #ifdef KVM_CAP_IRQCHIP
563 int kvm_set_irq_level(kvm_context_t kvm, int irq, int level)
565 struct kvm_irq_level event;
566 int r;
568 if (!kvm->irqchip_in_kernel)
569 return 0;
570 event.level = level;
571 event.irq = irq;
572 r = ioctl(kvm->vm_fd, KVM_IRQ_LINE, &event);
573 if (r == -1)
574 perror("kvm_set_irq_level");
575 return 1;
578 int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
580 int r;
582 if (!kvm->irqchip_in_kernel)
583 return 0;
584 r = ioctl(kvm->vm_fd, KVM_GET_IRQCHIP, chip);
585 if (r == -1) {
586 r = -errno;
587 perror("kvm_get_irqchip\n");
589 return r;
592 int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
594 int r;
596 if (!kvm->irqchip_in_kernel)
597 return 0;
598 r = ioctl(kvm->vm_fd, KVM_SET_IRQCHIP, chip);
599 if (r == -1) {
600 r = -errno;
601 perror("kvm_set_irqchip\n");
603 return r;
606 #endif
608 static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
610 uint16_t addr = run->io.port;
611 int r;
612 int i;
613 void *p = (void *)run + run->io.data_offset;
615 for (i = 0; i < run->io.count; ++i) {
616 switch (run->io.direction) {
617 case KVM_EXIT_IO_IN:
618 switch (run->io.size) {
619 case 1:
620 r = kvm->callbacks->inb(kvm->opaque, addr, p);
621 break;
622 case 2:
623 r = kvm->callbacks->inw(kvm->opaque, addr, p);
624 break;
625 case 4:
626 r = kvm->callbacks->inl(kvm->opaque, addr, p);
627 break;
628 default:
629 fprintf(stderr, "bad I/O size %d\n", run->io.size);
630 return -EMSGSIZE;
632 break;
633 case KVM_EXIT_IO_OUT:
634 switch (run->io.size) {
635 case 1:
636 r = kvm->callbacks->outb(kvm->opaque, addr,
637 *(uint8_t *)p);
638 break;
639 case 2:
640 r = kvm->callbacks->outw(kvm->opaque, addr,
641 *(uint16_t *)p);
642 break;
643 case 4:
644 r = kvm->callbacks->outl(kvm->opaque, addr,
645 *(uint32_t *)p);
646 break;
647 default:
648 fprintf(stderr, "bad I/O size %d\n", run->io.size);
649 return -EMSGSIZE;
651 break;
652 default:
653 fprintf(stderr, "bad I/O direction %d\n", run->io.direction);
654 return -EPROTO;
657 p += run->io.size;
660 return 0;
663 int handle_debug(kvm_context_t kvm, int vcpu)
665 return kvm->callbacks->debug(kvm->opaque, vcpu);
668 int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
670 return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_REGS, regs);
673 int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
675 return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, regs);
678 int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
680 return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_FPU, fpu);
683 int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
685 return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_FPU, fpu);
688 int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
690 return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_SREGS, sregs);
693 int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
695 return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SREGS, sregs);
698 static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
700 unsigned long addr = kvm_run->mmio.phys_addr;
701 void *data = kvm_run->mmio.data;
702 int r = -1;
704 /* hack: Red Hat 7.1 generates these wierd accesses. */
705 if (addr == 0xa0000 && kvm_run->mmio.len == 3)
706 return 0;
708 if (kvm_run->mmio.is_write) {
709 switch (kvm_run->mmio.len) {
710 case 1:
711 r = kvm->callbacks->writeb(kvm->opaque, addr, *(uint8_t *)data);
712 break;
713 case 2:
714 r = kvm->callbacks->writew(kvm->opaque, addr, *(uint16_t *)data);
715 break;
716 case 4:
717 r = kvm->callbacks->writel(kvm->opaque, addr, *(uint32_t *)data);
718 break;
719 case 8:
720 r = kvm->callbacks->writeq(kvm->opaque, addr, *(uint64_t *)data);
721 break;
723 } else {
724 switch (kvm_run->mmio.len) {
725 case 1:
726 r = kvm->callbacks->readb(kvm->opaque, addr, (uint8_t *)data);
727 break;
728 case 2:
729 r = kvm->callbacks->readw(kvm->opaque, addr, (uint16_t *)data);
730 break;
731 case 4:
732 r = kvm->callbacks->readl(kvm->opaque, addr, (uint32_t *)data);
733 break;
734 case 8:
735 r = kvm->callbacks->readq(kvm->opaque, addr, (uint64_t *)data);
736 break;
739 return r;
742 int handle_io_window(kvm_context_t kvm)
744 return kvm->callbacks->io_window(kvm->opaque);
747 int handle_halt(kvm_context_t kvm, int vcpu)
749 return kvm->callbacks->halt(kvm->opaque, vcpu);
752 int handle_shutdown(kvm_context_t kvm, int vcpu)
754 return kvm->callbacks->shutdown(kvm->opaque, vcpu);
757 int try_push_interrupts(kvm_context_t kvm)
759 return kvm->callbacks->try_push_interrupts(kvm->opaque);
762 void post_kvm_run(kvm_context_t kvm, int vcpu)
764 kvm->callbacks->post_kvm_run(kvm->opaque, vcpu);
767 int pre_kvm_run(kvm_context_t kvm, int vcpu)
769 return kvm->callbacks->pre_kvm_run(kvm->opaque, vcpu);
772 int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
774 struct kvm_run *run = kvm->run[vcpu];
776 if (kvm_abi == 10)
777 return ((struct kvm_run_abi10 *)run)->if_flag;
778 return run->if_flag;
781 int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu)
783 struct kvm_run *run = kvm->run[vcpu];
785 if (kvm_abi == 10)
786 return ((struct kvm_run_abi10 *)run)->ready_for_interrupt_injection;
787 return run->ready_for_interrupt_injection;
790 int kvm_run(kvm_context_t kvm, int vcpu)
792 int r;
793 int fd = kvm->vcpu_fd[vcpu];
794 struct kvm_run *run = kvm->run[vcpu];
796 if (kvm_abi == 10)
797 return kvm_run_abi10(kvm, vcpu);
799 again:
800 if (!kvm->irqchip_in_kernel)
801 run->request_interrupt_window = try_push_interrupts(kvm);
802 r = pre_kvm_run(kvm, vcpu);
803 if (r)
804 return r;
805 r = ioctl(fd, KVM_RUN, 0);
806 post_kvm_run(kvm, vcpu);
808 if (r == -1 && errno != EINTR && errno != EAGAIN) {
809 r = -errno;
810 printf("kvm_run: %m\n");
811 return r;
813 if (r == -1) {
814 r = handle_io_window(kvm);
815 goto more;
817 if (1) {
818 switch (run->exit_reason) {
819 case KVM_EXIT_UNKNOWN:
820 fprintf(stderr, "unhandled vm exit: 0x%x vcpu_id %d\n",
821 (unsigned)run->hw.hardware_exit_reason, vcpu);
822 kvm_show_regs(kvm, vcpu);
823 abort();
824 break;
825 case KVM_EXIT_FAIL_ENTRY:
826 fprintf(stderr, "kvm_run: failed entry, reason %u\n",
827 (unsigned)run->fail_entry.hardware_entry_failure_reason & 0xffff);
828 return -ENOEXEC;
829 break;
830 case KVM_EXIT_EXCEPTION:
831 fprintf(stderr, "exception %d (%x)\n",
832 run->ex.exception,
833 run->ex.error_code);
834 kvm_show_regs(kvm, vcpu);
835 kvm_show_code(kvm, vcpu);
836 abort();
837 break;
838 case KVM_EXIT_IO:
839 r = handle_io(kvm, run, vcpu);
840 break;
841 case KVM_EXIT_DEBUG:
842 r = handle_debug(kvm, vcpu);
843 break;
844 case KVM_EXIT_MMIO:
845 r = handle_mmio(kvm, run);
846 break;
847 case KVM_EXIT_HLT:
848 r = handle_halt(kvm, vcpu);
849 break;
850 case KVM_EXIT_IRQ_WINDOW_OPEN:
851 break;
852 case KVM_EXIT_SHUTDOWN:
853 r = handle_shutdown(kvm, vcpu);
854 break;
855 #ifdef KVM_EXIT_SET_TPR
856 case KVM_EXIT_SET_TPR:
857 break;
858 #endif
859 default:
860 fprintf(stderr, "unhandled vm exit: 0x%x\n", run->exit_reason);
861 kvm_show_regs(kvm, vcpu);
862 abort();
863 break;
866 more:
867 if (!r)
868 goto again;
869 return r;
872 int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
874 struct kvm_interrupt intr;
876 intr.irq = irq;
877 return ioctl(kvm->vcpu_fd[vcpu], KVM_INTERRUPT, &intr);
880 int kvm_guest_debug(kvm_context_t kvm, int vcpu, struct kvm_debug_guest *dbg)
882 return ioctl(kvm->vcpu_fd[vcpu], KVM_DEBUG_GUEST, dbg);
885 int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
887 struct kvm_signal_mask *sigmask;
888 int r;
890 if (!sigset) {
891 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, NULL);
892 if (r == -1)
893 r = -errno;
894 return r;
896 sigmask = malloc(sizeof(*sigmask) + sizeof(*sigset));
897 if (!sigmask)
898 return -ENOMEM;
900 sigmask->len = 8;
901 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
902 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, sigmask);
903 if (r == -1)
904 r = -errno;
905 free(sigmask);
906 return r;
909 int kvm_irqchip_in_kernel(kvm_context_t kvm)
911 return kvm->irqchip_in_kernel;