2 * Kernel-based Virtual Machine control library
4 * This library provides an API to control the kvm hardware virtualization
7 * Copyright (C) 2006 Qumranet
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
14 * This work is licensed under the GNU LGPL license, version 2.
24 #include <sys/ioctl.h>
26 #include "kvm-abi-10.h"
28 #define EXPECTED_KVM_API_VERSION 12
30 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
31 #error libkvm: userspace and kernel version mismatch
34 static int kvm_abi
= EXPECTED_KVM_API_VERSION
;
36 #define PAGE_SIZE 4096ul
38 /* FIXME: share this number with kvm */
39 /* FIXME: or dynamically alloc/realloc regions */
40 #define KVM_MAX_NUM_MEM_REGIONS 4u
44 * \brief The KVM context
46 * The verbose KVM context
49 /// Filedescriptor to /dev/kvm
52 int vcpu_fd
[MAX_VCPUS
];
53 struct kvm_run
*run
[MAX_VCPUS
];
54 /// Callbacks that KVM uses to emulate various unvirtualizable functionality
55 struct kvm_callbacks
*callbacks
;
57 /// A pointer to the memory used as the physical memory for the guest
58 void *physical_memory
;
59 /// is dirty pages logging enabled for all regions or not
60 int dirty_pages_log_all
;
61 /// memory regions parameters
62 struct kvm_memory_region mem_regions
[KVM_MAX_NUM_MEM_REGIONS
];
66 * memory regions parameters
68 static void kvm_memory_region_save_params(kvm_context_t kvm
,
69 struct kvm_memory_region
*mem
)
71 if (!mem
|| (mem
->slot
>= KVM_MAX_NUM_MEM_REGIONS
)) {
72 fprintf(stderr
, "BUG: %s: invalid parameters\n", __FUNCTION__
);
75 kvm
->mem_regions
[mem
->slot
] = *mem
;
78 static void kvm_memory_region_clear_params(kvm_context_t kvm
, int regnum
)
80 if (regnum
>= KVM_MAX_NUM_MEM_REGIONS
) {
81 fprintf(stderr
, "BUG: %s: invalid parameters\n", __FUNCTION__
);
84 kvm
->mem_regions
[regnum
].memory_size
= 0;
88 * dirty pages logging control
90 static int kvm_dirty_pages_log_change(kvm_context_t kvm
, int regnum
, __u32 flag
)
93 struct kvm_memory_region
*mem
;
95 if (regnum
>= KVM_MAX_NUM_MEM_REGIONS
) {
96 fprintf(stderr
, "BUG: %s: invalid parameters\n", __FUNCTION__
);
99 mem
= &kvm
->mem_regions
[regnum
];
100 if (mem
->memory_size
== 0) /* not used */
102 if (mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) /* log already enabled */
104 mem
->flags
|= flag
; /* temporary turn on flag */
105 r
= ioctl(kvm
->vm_fd
, KVM_SET_MEMORY_REGION
, mem
);
106 mem
->flags
&= ~flag
; /* back to previous value */
108 fprintf(stderr
, "%s: %m\n", __FUNCTION__
);
113 static int kvm_dirty_pages_log_change_all(kvm_context_t kvm
, __u32 flag
)
117 for (i
=r
=0; i
<KVM_MAX_NUM_MEM_REGIONS
&& r
==0; i
++) {
118 r
= kvm_dirty_pages_log_change(kvm
, i
, flag
);
124 * Enable dirty page logging for all memory regions
126 int kvm_dirty_pages_log_enable_all(kvm_context_t kvm
)
128 if (kvm
->dirty_pages_log_all
)
130 kvm
->dirty_pages_log_all
= 1;
131 return kvm_dirty_pages_log_change_all(kvm
, KVM_MEM_LOG_DIRTY_PAGES
);
135 * Enable dirty page logging only for memory regions that were created with
136 * dirty logging enabled (disable for all other memory regions).
138 int kvm_dirty_pages_log_reset(kvm_context_t kvm
)
140 if (!kvm
->dirty_pages_log_all
)
142 kvm
->dirty_pages_log_all
= 0;
143 return kvm_dirty_pages_log_change_all(kvm
, 0);
147 kvm_context_t
kvm_init(struct kvm_callbacks
*callbacks
,
154 fd
= open("/dev/kvm", O_RDWR
);
156 perror("open /dev/kvm");
159 r
= ioctl(fd
, KVM_GET_API_VERSION
, 0);
161 fprintf(stderr
, "kvm kernel version too old: "
162 "KVM_GET_API_VERSION ioctl not supported\n");
165 if (r
< EXPECTED_KVM_API_VERSION
&& r
!= 10) {
166 fprintf(stderr
, "kvm kernel version too old: "
167 "We expect API version %d or newer, but got "
169 EXPECTED_KVM_API_VERSION
, r
);
172 if (r
> EXPECTED_KVM_API_VERSION
) {
173 fprintf(stderr
, "kvm userspace version too old\n");
177 kvm
= malloc(sizeof(*kvm
));
180 kvm
->callbacks
= callbacks
;
181 kvm
->opaque
= opaque
;
182 kvm
->dirty_pages_log_all
= 0;
183 memset(&kvm
->mem_regions
, 0, sizeof(kvm
->mem_regions
));
191 void kvm_finalize(kvm_context_t kvm
)
193 if (kvm
->vcpu_fd
[0] != -1)
194 close(kvm
->vcpu_fd
[0]);
195 if (kvm
->vm_fd
!= -1)
201 int kvm_create_vcpu(kvm_context_t kvm
, int slot
)
206 r
= ioctl(kvm
->vm_fd
, KVM_CREATE_VCPU
, slot
);
209 fprintf(stderr
, "kvm_create_vcpu: %m\n");
212 kvm
->vcpu_fd
[slot
] = r
;
213 mmap_size
= ioctl(kvm
->fd
, KVM_GET_VCPU_MMAP_SIZE
, 0);
214 if (mmap_size
== -1) {
216 fprintf(stderr
, "get vcpu mmap size: %m\n");
219 kvm
->run
[slot
] = mmap(0, mmap_size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
220 kvm
->vcpu_fd
[slot
], 0);
221 if (kvm
->run
[slot
] == MAP_FAILED
) {
223 fprintf(stderr
, "mmap vcpu area: %m\n");
229 int kvm_create(kvm_context_t kvm
, unsigned long memory
, void **vm_mem
)
231 unsigned long dosmem
= 0xa0000;
232 unsigned long exmem
= 0xc0000;
236 struct kvm_memory_region low_memory
= {
238 .memory_size
= memory
< dosmem
? memory
: dosmem
,
239 .guest_phys_addr
= 0,
241 struct kvm_memory_region extended_memory
= {
243 .memory_size
= memory
< exmem
? 0 : memory
- exmem
,
244 .guest_phys_addr
= exmem
,
247 kvm
->vcpu_fd
[0] = -1;
249 fd
= ioctl(fd
, KVM_CREATE_VM
, 0);
251 fprintf(stderr
, "kvm_create_vm: %m\n");
256 /* 640K should be enough. */
257 r
= ioctl(fd
, KVM_SET_MEMORY_REGION
, &low_memory
);
259 fprintf(stderr
, "kvm_create_memory_region: %m\n");
262 if (extended_memory
.memory_size
) {
263 r
= ioctl(fd
, KVM_SET_MEMORY_REGION
, &extended_memory
);
265 fprintf(stderr
, "kvm_create_memory_region: %m\n");
270 kvm_memory_region_save_params(kvm
, &low_memory
);
271 kvm_memory_region_save_params(kvm
, &extended_memory
);
273 *vm_mem
= mmap(0, memory
, PROT_READ
|PROT_WRITE
, MAP_SHARED
, fd
, 0);
274 if (*vm_mem
== MAP_FAILED
) {
275 fprintf(stderr
, "mmap: %m\n");
278 kvm
->physical_memory
= *vm_mem
;
280 zfd
= open("/dev/zero", O_RDONLY
);
281 mmap(*vm_mem
+ 0xa8000, 0x8000, PROT_READ
|PROT_WRITE
,
282 MAP_PRIVATE
|MAP_FIXED
, zfd
, 0);
285 r
= kvm_create_vcpu(kvm
, 0);
292 void *kvm_create_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
293 unsigned long len
, int slot
, int log
, int writable
)
298 int prot
= PROT_READ
;
299 struct kvm_memory_region memory
= {
302 .guest_phys_addr
= phys_start
,
303 .flags
= log
? KVM_MEM_LOG_DIRTY_PAGES
: 0,
306 r
= ioctl(fd
, KVM_SET_MEMORY_REGION
, &memory
);
310 kvm_memory_region_save_params(kvm
, &memory
);
315 ptr
= mmap(0, len
, prot
, MAP_SHARED
, fd
, phys_start
);
316 if (ptr
== MAP_FAILED
)
321 void kvm_destroy_phys_mem(kvm_context_t kvm
, unsigned long phys_start
,
324 //for each memory region in (phys_start, phys_start+len) do
325 // kvm_memory_region_clear_params(kvm, region);
326 kvm_memory_region_clear_params(kvm
, 0); /* avoid compiler warning */
327 printf("kvm_destroy_phys_mem: implement me\n");
331 int kvm_create_memory_alias(kvm_context_t kvm
,
335 uint64_t target_phys
)
337 struct kvm_memory_alias alias
= {
340 .guest_phys_addr
= phys_start
,
342 .target_phys_addr
= target_phys
,
347 r
= ioctl(fd
, KVM_SET_MEMORY_ALIAS
, &alias
);
354 int kvm_destroy_memory_alias(kvm_context_t kvm
, int slot
)
356 return kvm_create_memory_alias(kvm
, slot
, 0, 0, 0);
359 static int kvm_get_map(kvm_context_t kvm
, int ioctl_num
, int slot
, void *buf
)
362 struct kvm_dirty_log log
= {
366 log
.dirty_bitmap
= buf
;
368 r
= ioctl(kvm
->vm_fd
, ioctl_num
, &log
);
374 int kvm_get_dirty_pages(kvm_context_t kvm
, int slot
, void *buf
)
376 return kvm_get_map(kvm
, KVM_GET_DIRTY_LOG
, slot
, buf
);
379 int kvm_get_mem_map(kvm_context_t kvm
, int slot
, void *buf
)
381 #ifdef KVM_GET_MEM_MAP
382 return kvm_get_map(kvm
, KVM_GET_MEM_MAP
, slot
, buf
);
383 #else /* not KVM_GET_MEM_MAP ==> fake it: all pages exist */
384 unsigned long i
, n
, m
, npages
;
387 if (slot
>= KVM_MAX_NUM_MEM_REGIONS
) {
391 npages
= kvm
->mem_regions
[slot
].memory_size
/ PAGE_SIZE
;
394 memset(buf
, 0xff, n
); /* all pages exist */
396 for (i
=0; i
<=m
; i
++) /* last byte may not be "aligned" */
399 *(unsigned char*)(buf
+n
) = v
;
401 #endif /* KVM_GET_MEM_MAP */
404 static int handle_io_abi10(kvm_context_t kvm
, struct kvm_run_abi10
*run
,
407 uint16_t addr
= run
->io
.port
;
410 void *p
= (void *)run
+ run
->io
.data_offset
;
412 for (i
= 0; i
< run
->io
.count
; ++i
) {
413 switch (run
->io
.direction
) {
415 switch (run
->io
.size
) {
417 r
= kvm
->callbacks
->inb(kvm
->opaque
, addr
, p
);
420 r
= kvm
->callbacks
->inw(kvm
->opaque
, addr
, p
);
423 r
= kvm
->callbacks
->inl(kvm
->opaque
, addr
, p
);
426 fprintf(stderr
, "bad I/O size %d\n", run
->io
.size
);
430 case KVM_EXIT_IO_OUT
:
431 switch (run
->io
.size
) {
433 r
= kvm
->callbacks
->outb(kvm
->opaque
, addr
,
437 r
= kvm
->callbacks
->outw(kvm
->opaque
, addr
,
441 r
= kvm
->callbacks
->outl(kvm
->opaque
, addr
,
445 fprintf(stderr
, "bad I/O size %d\n", run
->io
.size
);
450 fprintf(stderr
, "bad I/O direction %d\n", run
->io
.direction
);
456 run
->io_completed
= 1;
461 static int handle_io(kvm_context_t kvm
, struct kvm_run
*run
, int vcpu
)
463 uint16_t addr
= run
->io
.port
;
466 void *p
= (void *)run
+ run
->io
.data_offset
;
468 for (i
= 0; i
< run
->io
.count
; ++i
) {
469 switch (run
->io
.direction
) {
471 switch (run
->io
.size
) {
473 r
= kvm
->callbacks
->inb(kvm
->opaque
, addr
, p
);
476 r
= kvm
->callbacks
->inw(kvm
->opaque
, addr
, p
);
479 r
= kvm
->callbacks
->inl(kvm
->opaque
, addr
, p
);
482 fprintf(stderr
, "bad I/O size %d\n", run
->io
.size
);
486 case KVM_EXIT_IO_OUT
:
487 switch (run
->io
.size
) {
489 r
= kvm
->callbacks
->outb(kvm
->opaque
, addr
,
493 r
= kvm
->callbacks
->outw(kvm
->opaque
, addr
,
497 r
= kvm
->callbacks
->outl(kvm
->opaque
, addr
,
501 fprintf(stderr
, "bad I/O size %d\n", run
->io
.size
);
506 fprintf(stderr
, "bad I/O direction %d\n", run
->io
.direction
);
516 static int handle_debug(kvm_context_t kvm
, int vcpu
)
518 return kvm
->callbacks
->debug(kvm
->opaque
, vcpu
);
521 int kvm_get_regs(kvm_context_t kvm
, int vcpu
, struct kvm_regs
*regs
)
523 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_REGS
, regs
);
526 int kvm_set_regs(kvm_context_t kvm
, int vcpu
, struct kvm_regs
*regs
)
528 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_REGS
, regs
);
531 int kvm_get_fpu(kvm_context_t kvm
, int vcpu
, struct kvm_fpu
*fpu
)
533 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_FPU
, fpu
);
536 int kvm_set_fpu(kvm_context_t kvm
, int vcpu
, struct kvm_fpu
*fpu
)
538 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_FPU
, fpu
);
541 int kvm_get_sregs(kvm_context_t kvm
, int vcpu
, struct kvm_sregs
*sregs
)
543 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_SREGS
, sregs
);
546 int kvm_set_sregs(kvm_context_t kvm
, int vcpu
, struct kvm_sregs
*sregs
)
548 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_SREGS
, sregs
);
552 * Returns available msr list. User must free.
554 struct kvm_msr_list
*kvm_get_msr_list(kvm_context_t kvm
)
556 struct kvm_msr_list sizer
, *msrs
;
560 r
= ioctl(kvm
->fd
, KVM_GET_MSR_INDEX_LIST
, &sizer
);
561 if (r
== -1 && errno
!= E2BIG
)
563 msrs
= malloc(sizeof *msrs
+ sizer
.nmsrs
* sizeof *msrs
->indices
);
568 msrs
->nmsrs
= sizer
.nmsrs
;
569 r
= ioctl(kvm
->fd
, KVM_GET_MSR_INDEX_LIST
, msrs
);
579 int kvm_get_msrs(kvm_context_t kvm
, int vcpu
, struct kvm_msr_entry
*msrs
,
582 struct kvm_msrs
*kmsrs
= malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
590 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
591 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_MSRS
, kmsrs
);
593 memcpy(msrs
, kmsrs
->entries
, n
* sizeof *msrs
);
599 int kvm_set_msrs(kvm_context_t kvm
, int vcpu
, struct kvm_msr_entry
*msrs
,
602 struct kvm_msrs
*kmsrs
= malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
610 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
611 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_MSRS
, kmsrs
);
618 static void print_seg(FILE *file
, const char *name
, struct kvm_segment
*seg
)
621 "%s %04x (%08llx/%08x p %d dpl %d db %d s %d type %x l %d"
623 name
, seg
->selector
, seg
->base
, seg
->limit
, seg
->present
,
624 seg
->dpl
, seg
->db
, seg
->s
, seg
->type
, seg
->l
, seg
->g
,
628 static void print_dt(FILE *file
, const char *name
, struct kvm_dtable
*dt
)
630 fprintf(stderr
, "%s %llx/%x\n", name
, dt
->base
, dt
->limit
);
633 void kvm_show_regs(kvm_context_t kvm
, int vcpu
)
635 int fd
= kvm
->vcpu_fd
[vcpu
];
636 struct kvm_regs regs
;
637 struct kvm_sregs sregs
;
640 r
= ioctl(fd
, KVM_GET_REGS
, ®s
);
642 perror("KVM_GET_REGS");
646 "rax %016llx rbx %016llx rcx %016llx rdx %016llx\n"
647 "rsi %016llx rdi %016llx rsp %016llx rbp %016llx\n"
648 "r8 %016llx r9 %016llx r10 %016llx r11 %016llx\n"
649 "r12 %016llx r13 %016llx r14 %016llx r15 %016llx\n"
650 "rip %016llx rflags %08llx\n",
651 regs
.rax
, regs
.rbx
, regs
.rcx
, regs
.rdx
,
652 regs
.rsi
, regs
.rdi
, regs
.rsp
, regs
.rbp
,
653 regs
.r8
, regs
.r9
, regs
.r10
, regs
.r11
,
654 regs
.r12
, regs
.r13
, regs
.r14
, regs
.r15
,
655 regs
.rip
, regs
.rflags
);
656 r
= ioctl(fd
, KVM_GET_SREGS
, &sregs
);
658 perror("KVM_GET_SREGS");
661 print_seg(stderr
, "cs", &sregs
.cs
);
662 print_seg(stderr
, "ds", &sregs
.ds
);
663 print_seg(stderr
, "es", &sregs
.es
);
664 print_seg(stderr
, "ss", &sregs
.ss
);
665 print_seg(stderr
, "fs", &sregs
.fs
);
666 print_seg(stderr
, "gs", &sregs
.gs
);
667 print_seg(stderr
, "tr", &sregs
.tr
);
668 print_seg(stderr
, "ldt", &sregs
.ldt
);
669 print_dt(stderr
, "gdt", &sregs
.gdt
);
670 print_dt(stderr
, "idt", &sregs
.idt
);
671 fprintf(stderr
, "cr0 %llx cr2 %llx cr3 %llx cr4 %llx cr8 %llx"
673 sregs
.cr0
, sregs
.cr2
, sregs
.cr3
, sregs
.cr4
, sregs
.cr8
,
677 static void kvm_show_code(kvm_context_t kvm
, int vcpu
)
679 #define CR0_PE_MASK (1ULL<<0)
680 int fd
= kvm
->vcpu_fd
[vcpu
];
681 struct kvm_regs regs
;
682 struct kvm_sregs sregs
;
684 unsigned char code
[30];
685 char code_str
[sizeof(code
) * 3 + 1];
688 r
= ioctl(fd
, KVM_GET_SREGS
, &sregs
);
690 perror("KVM_GET_SREGS");
693 if (sregs
.cr0
& CR0_PE_MASK
)
696 r
= ioctl(fd
, KVM_GET_REGS
, ®s
);
698 perror("KVM_GET_REGS");
701 rip
= sregs
.cs
.base
* 16 + regs
.rip
;
702 memcpy(code
, kvm
->physical_memory
+ rip
, sizeof code
);
704 for (r
= 0; r
< sizeof code
; ++r
)
705 sprintf(code_str
+ strlen(code_str
), " %02x", code
[r
]);
706 fprintf(stderr
, "code:%s\n", code_str
);
709 static int handle_mmio_abi10(kvm_context_t kvm
, struct kvm_run_abi10
*kvm_run
)
711 unsigned long addr
= kvm_run
->mmio
.phys_addr
;
712 void *data
= kvm_run
->mmio
.data
;
715 if (kvm_run
->mmio
.is_write
) {
716 switch (kvm_run
->mmio
.len
) {
718 r
= kvm
->callbacks
->writeb(kvm
->opaque
, addr
, *(uint8_t *)data
);
721 r
= kvm
->callbacks
->writew(kvm
->opaque
, addr
, *(uint16_t *)data
);
724 r
= kvm
->callbacks
->writel(kvm
->opaque
, addr
, *(uint32_t *)data
);
727 r
= kvm
->callbacks
->writeq(kvm
->opaque
, addr
, *(uint64_t *)data
);
731 switch (kvm_run
->mmio
.len
) {
733 r
= kvm
->callbacks
->readb(kvm
->opaque
, addr
, (uint8_t *)data
);
736 r
= kvm
->callbacks
->readw(kvm
->opaque
, addr
, (uint16_t *)data
);
739 r
= kvm
->callbacks
->readl(kvm
->opaque
, addr
, (uint32_t *)data
);
742 r
= kvm
->callbacks
->readq(kvm
->opaque
, addr
, (uint64_t *)data
);
745 kvm_run
->io_completed
= 1;
750 static int handle_mmio(kvm_context_t kvm
, struct kvm_run
*kvm_run
)
752 unsigned long addr
= kvm_run
->mmio
.phys_addr
;
753 void *data
= kvm_run
->mmio
.data
;
756 if (kvm_run
->mmio
.is_write
) {
757 switch (kvm_run
->mmio
.len
) {
759 r
= kvm
->callbacks
->writeb(kvm
->opaque
, addr
, *(uint8_t *)data
);
762 r
= kvm
->callbacks
->writew(kvm
->opaque
, addr
, *(uint16_t *)data
);
765 r
= kvm
->callbacks
->writel(kvm
->opaque
, addr
, *(uint32_t *)data
);
768 r
= kvm
->callbacks
->writeq(kvm
->opaque
, addr
, *(uint64_t *)data
);
772 switch (kvm_run
->mmio
.len
) {
774 r
= kvm
->callbacks
->readb(kvm
->opaque
, addr
, (uint8_t *)data
);
777 r
= kvm
->callbacks
->readw(kvm
->opaque
, addr
, (uint16_t *)data
);
780 r
= kvm
->callbacks
->readl(kvm
->opaque
, addr
, (uint32_t *)data
);
783 r
= kvm
->callbacks
->readq(kvm
->opaque
, addr
, (uint64_t *)data
);
790 static int handle_io_window(kvm_context_t kvm
)
792 return kvm
->callbacks
->io_window(kvm
->opaque
);
795 static int handle_halt(kvm_context_t kvm
, int vcpu
)
797 return kvm
->callbacks
->halt(kvm
->opaque
, vcpu
);
800 static int handle_shutdown(kvm_context_t kvm
, int vcpu
)
802 return kvm
->callbacks
->shutdown(kvm
->opaque
, vcpu
);
805 int try_push_interrupts(kvm_context_t kvm
)
807 return kvm
->callbacks
->try_push_interrupts(kvm
->opaque
);
810 static void post_kvm_run(kvm_context_t kvm
, int vcpu
)
812 kvm
->callbacks
->post_kvm_run(kvm
->opaque
, vcpu
);
815 static void pre_kvm_run(kvm_context_t kvm
, int vcpu
)
817 kvm
->callbacks
->pre_kvm_run(kvm
->opaque
, vcpu
);
820 int kvm_get_interrupt_flag(kvm_context_t kvm
, int vcpu
)
822 struct kvm_run
*run
= kvm
->run
[vcpu
];
825 return ((struct kvm_run_abi10
*)run
)->if_flag
;
829 uint64_t kvm_get_apic_base(kvm_context_t kvm
, int vcpu
)
831 struct kvm_run
*run
= kvm
->run
[vcpu
];
834 return ((struct kvm_run_abi10
*)run
)->apic_base
;
835 return run
->apic_base
;
838 int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm
, int vcpu
)
840 struct kvm_run
*run
= kvm
->run
[vcpu
];
843 return ((struct kvm_run_abi10
*)run
)->ready_for_interrupt_injection
;
844 return run
->ready_for_interrupt_injection
;
847 void kvm_set_cr8(kvm_context_t kvm
, int vcpu
, uint64_t cr8
)
849 struct kvm_run
*run
= kvm
->run
[vcpu
];
852 ((struct kvm_run_abi10
*)run
)->cr8
= cr8
;
858 static int kvm_run_abi10(kvm_context_t kvm
, int vcpu
)
861 int fd
= kvm
->vcpu_fd
[vcpu
];
862 struct kvm_run_abi10
*run
= (struct kvm_run_abi10
*)kvm
->run
[vcpu
];
865 run
->request_interrupt_window
= try_push_interrupts(kvm
);
866 pre_kvm_run(kvm
, vcpu
);
867 r
= ioctl(fd
, KVM_RUN
, 0);
868 post_kvm_run(kvm
, vcpu
);
870 run
->io_completed
= 0;
871 if (r
== -1 && errno
!= EINTR
) {
873 printf("kvm_run: %m\n");
877 r
= handle_io_window(kvm
);
881 switch (run
->exit_reason
) {
882 case KVM_EXIT_UNKNOWN
:
883 fprintf(stderr
, "unhandled vm exit: 0x%x\n",
884 (unsigned)run
->hw
.hardware_exit_reason
);
885 kvm_show_regs(kvm
, vcpu
);
888 case KVM_EXIT_FAIL_ENTRY
:
889 fprintf(stderr
, "kvm_run: failed entry, reason %u\n",
890 (unsigned)run
->fail_entry
.hardware_entry_failure_reason
& 0xffff);
893 case KVM_EXIT_EXCEPTION
:
894 fprintf(stderr
, "exception %d (%x)\n",
897 kvm_show_regs(kvm
, vcpu
);
898 kvm_show_code(kvm
, vcpu
);
902 r
= handle_io_abi10(kvm
, run
, vcpu
);
905 r
= handle_debug(kvm
, vcpu
);
908 r
= handle_mmio_abi10(kvm
, run
);
911 r
= handle_halt(kvm
, vcpu
);
913 case KVM_EXIT_IRQ_WINDOW_OPEN
:
915 case KVM_EXIT_SHUTDOWN
:
916 r
= handle_shutdown(kvm
, vcpu
);
919 fprintf(stderr
, "unhandled vm exit: 0x%x\n", run
->exit_reason
);
920 kvm_show_regs(kvm
, vcpu
);
931 int kvm_run(kvm_context_t kvm
, int vcpu
)
934 int fd
= kvm
->vcpu_fd
[vcpu
];
935 struct kvm_run
*run
= kvm
->run
[vcpu
];
938 return kvm_run_abi10(kvm
, vcpu
);
941 run
->request_interrupt_window
= try_push_interrupts(kvm
);
942 pre_kvm_run(kvm
, vcpu
);
943 r
= ioctl(fd
, KVM_RUN
, 0);
944 post_kvm_run(kvm
, vcpu
);
946 if (r
== -1 && errno
!= EINTR
) {
948 printf("kvm_run: %m\n");
952 r
= handle_io_window(kvm
);
956 switch (run
->exit_reason
) {
957 case KVM_EXIT_UNKNOWN
:
958 fprintf(stderr
, "unhandled vm exit: 0x%x\n",
959 (unsigned)run
->hw
.hardware_exit_reason
);
960 kvm_show_regs(kvm
, vcpu
);
963 case KVM_EXIT_FAIL_ENTRY
:
964 fprintf(stderr
, "kvm_run: failed entry, reason %u\n",
965 (unsigned)run
->fail_entry
.hardware_entry_failure_reason
& 0xffff);
968 case KVM_EXIT_EXCEPTION
:
969 fprintf(stderr
, "exception %d (%x)\n",
972 kvm_show_regs(kvm
, vcpu
);
973 kvm_show_code(kvm
, vcpu
);
977 r
= handle_io(kvm
, run
, vcpu
);
980 r
= handle_debug(kvm
, vcpu
);
983 r
= handle_mmio(kvm
, run
);
986 r
= handle_halt(kvm
, vcpu
);
988 case KVM_EXIT_IRQ_WINDOW_OPEN
:
990 case KVM_EXIT_SHUTDOWN
:
991 r
= handle_shutdown(kvm
, vcpu
);
994 fprintf(stderr
, "unhandled vm exit: 0x%x\n", run
->exit_reason
);
995 kvm_show_regs(kvm
, vcpu
);
1006 int kvm_inject_irq(kvm_context_t kvm
, int vcpu
, unsigned irq
)
1008 struct kvm_interrupt intr
;
1011 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_INTERRUPT
, &intr
);
1014 int kvm_guest_debug(kvm_context_t kvm
, int vcpu
, struct kvm_debug_guest
*dbg
)
1016 return ioctl(kvm
->vcpu_fd
[vcpu
], KVM_DEBUG_GUEST
, dbg
);
1019 int kvm_setup_cpuid(kvm_context_t kvm
, int vcpu
, int nent
,
1020 struct kvm_cpuid_entry
*entries
)
1022 struct kvm_cpuid
*cpuid
;
1025 cpuid
= malloc(sizeof(*cpuid
) + nent
* sizeof(*entries
));
1030 memcpy(cpuid
->entries
, entries
, nent
* sizeof(*entries
));
1031 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_CPUID
, cpuid
);
1037 int kvm_set_signal_mask(kvm_context_t kvm
, int vcpu
, const sigset_t
*sigset
)
1039 struct kvm_signal_mask
*sigmask
;
1043 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_SIGNAL_MASK
, NULL
);
1048 sigmask
= malloc(sizeof(*sigmask
) + sizeof(*sigset
));
1053 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
1054 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_SIGNAL_MASK
, sigmask
);