kvm: testsuite: add protected mode smp tests
[qemu-kvm/fedora.git] / kvm / user / kvmctl.c
blobce361df7ee6fb8edb60b40b45768312af364147e
1 /*
2 * Kernel-based Virtual Machine control library
4 * This library provides an API to control the kvm hardware virtualization
5 * module.
7 * Copyright (C) 2006 Qumranet
9 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
14 * This work is licensed under the GNU LGPL license, version 2.
17 #include <unistd.h>
18 #include <fcntl.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <sys/mman.h>
22 #include <string.h>
23 #include <errno.h>
24 #include <sys/ioctl.h>
25 #include "kvmctl.h"
26 #include "kvm-abi-10.h"
28 #define EXPECTED_KVM_API_VERSION 12
30 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
31 #error libkvm: userspace and kernel version mismatch
32 #endif
34 static int kvm_abi = EXPECTED_KVM_API_VERSION;
36 #define PAGE_SIZE 4096ul
38 /* FIXME: share this number with kvm */
39 /* FIXME: or dynamically alloc/realloc regions */
40 #define KVM_MAX_NUM_MEM_REGIONS 4u
41 #define MAX_VCPUS 4
43 /**
44 * \brief The KVM context
46 * The verbose KVM context
48 struct kvm_context {
49 /// Filedescriptor to /dev/kvm
50 int fd;
51 int vm_fd;
52 int vcpu_fd[MAX_VCPUS];
53 struct kvm_run *run[MAX_VCPUS];
54 /// Callbacks that KVM uses to emulate various unvirtualizable functionality
55 struct kvm_callbacks *callbacks;
56 void *opaque;
57 /// A pointer to the memory used as the physical memory for the guest
58 void *physical_memory;
59 /// is dirty pages logging enabled for all regions or not
60 int dirty_pages_log_all;
61 /// memory regions parameters
62 struct kvm_memory_region mem_regions[KVM_MAX_NUM_MEM_REGIONS];
66 * memory regions parameters
68 static void kvm_memory_region_save_params(kvm_context_t kvm,
69 struct kvm_memory_region *mem)
71 if (!mem || (mem->slot >= KVM_MAX_NUM_MEM_REGIONS)) {
72 fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
73 return;
75 kvm->mem_regions[mem->slot] = *mem;
78 static void kvm_memory_region_clear_params(kvm_context_t kvm, int regnum)
80 if (regnum >= KVM_MAX_NUM_MEM_REGIONS) {
81 fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
82 return;
84 kvm->mem_regions[regnum].memory_size = 0;
87 /*
88 * dirty pages logging control
90 static int kvm_dirty_pages_log_change(kvm_context_t kvm, int regnum, __u32 flag)
92 int r;
93 struct kvm_memory_region *mem;
95 if (regnum >= KVM_MAX_NUM_MEM_REGIONS) {
96 fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
97 return 1;
99 mem = &kvm->mem_regions[regnum];
100 if (mem->memory_size == 0) /* not used */
101 return 0;
102 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) /* log already enabled */
103 return 0;
104 mem->flags |= flag; /* temporary turn on flag */
105 r = ioctl(kvm->vm_fd, KVM_SET_MEMORY_REGION, mem);
106 mem->flags &= ~flag; /* back to previous value */
107 if (r == -1) {
108 fprintf(stderr, "%s: %m\n", __FUNCTION__);
110 return r;
113 static int kvm_dirty_pages_log_change_all(kvm_context_t kvm, __u32 flag)
115 int i, r;
117 for (i=r=0; i<KVM_MAX_NUM_MEM_REGIONS && r==0; i++) {
118 r = kvm_dirty_pages_log_change(kvm, i, flag);
120 return r;
124 * Enable dirty page logging for all memory regions
126 int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
128 if (kvm->dirty_pages_log_all)
129 return 0;
130 kvm->dirty_pages_log_all = 1;
131 return kvm_dirty_pages_log_change_all(kvm, KVM_MEM_LOG_DIRTY_PAGES);
135 * Enable dirty page logging only for memory regions that were created with
136 * dirty logging enabled (disable for all other memory regions).
138 int kvm_dirty_pages_log_reset(kvm_context_t kvm)
140 if (!kvm->dirty_pages_log_all)
141 return 0;
142 kvm->dirty_pages_log_all = 0;
143 return kvm_dirty_pages_log_change_all(kvm, 0);
147 kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
148 void *opaque)
150 int fd;
151 kvm_context_t kvm;
152 int r;
154 fd = open("/dev/kvm", O_RDWR);
155 if (fd == -1) {
156 perror("open /dev/kvm");
157 return NULL;
159 r = ioctl(fd, KVM_GET_API_VERSION, 0);
160 if (r == -1) {
161 fprintf(stderr, "kvm kernel version too old: "
162 "KVM_GET_API_VERSION ioctl not supported\n");
163 goto out_close;
165 if (r < EXPECTED_KVM_API_VERSION && r != 10) {
166 fprintf(stderr, "kvm kernel version too old: "
167 "We expect API version %d or newer, but got "
168 "version %d\n",
169 EXPECTED_KVM_API_VERSION, r);
170 goto out_close;
172 if (r > EXPECTED_KVM_API_VERSION) {
173 fprintf(stderr, "kvm userspace version too old\n");
174 goto out_close;
176 kvm_abi = r;
177 kvm = malloc(sizeof(*kvm));
178 kvm->fd = fd;
179 kvm->vm_fd = -1;
180 kvm->callbacks = callbacks;
181 kvm->opaque = opaque;
182 kvm->dirty_pages_log_all = 0;
183 memset(&kvm->mem_regions, 0, sizeof(kvm->mem_regions));
185 return kvm;
186 out_close:
187 close(fd);
188 return NULL;
191 void kvm_finalize(kvm_context_t kvm)
193 if (kvm->vcpu_fd[0] != -1)
194 close(kvm->vcpu_fd[0]);
195 if (kvm->vm_fd != -1)
196 close(kvm->vm_fd);
197 close(kvm->fd);
198 free(kvm);
201 int kvm_create_vcpu(kvm_context_t kvm, int slot)
203 long mmap_size;
204 int r;
206 r = ioctl(kvm->vm_fd, KVM_CREATE_VCPU, slot);
207 if (r == -1) {
208 r = -errno;
209 fprintf(stderr, "kvm_create_vcpu: %m\n");
210 return r;
212 kvm->vcpu_fd[slot] = r;
213 mmap_size = ioctl(kvm->fd, KVM_GET_VCPU_MMAP_SIZE, 0);
214 if (mmap_size == -1) {
215 r = -errno;
216 fprintf(stderr, "get vcpu mmap size: %m\n");
217 return r;
219 kvm->run[slot] = mmap(0, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED,
220 kvm->vcpu_fd[slot], 0);
221 if (kvm->run[slot] == MAP_FAILED) {
222 r = -errno;
223 fprintf(stderr, "mmap vcpu area: %m\n");
224 return r;
226 return 0;
229 int kvm_create(kvm_context_t kvm, unsigned long memory, void **vm_mem)
231 unsigned long dosmem = 0xa0000;
232 unsigned long exmem = 0xc0000;
233 int fd = kvm->fd;
234 int zfd;
235 int r;
236 struct kvm_memory_region low_memory = {
237 .slot = 3,
238 .memory_size = memory < dosmem ? memory : dosmem,
239 .guest_phys_addr = 0,
241 struct kvm_memory_region extended_memory = {
242 .slot = 0,
243 .memory_size = memory < exmem ? 0 : memory - exmem,
244 .guest_phys_addr = exmem,
247 kvm->vcpu_fd[0] = -1;
249 fd = ioctl(fd, KVM_CREATE_VM, 0);
250 if (fd == -1) {
251 fprintf(stderr, "kvm_create_vm: %m\n");
252 return -1;
254 kvm->vm_fd = fd;
256 /* 640K should be enough. */
257 r = ioctl(fd, KVM_SET_MEMORY_REGION, &low_memory);
258 if (r == -1) {
259 fprintf(stderr, "kvm_create_memory_region: %m\n");
260 return -1;
262 if (extended_memory.memory_size) {
263 r = ioctl(fd, KVM_SET_MEMORY_REGION, &extended_memory);
264 if (r == -1) {
265 fprintf(stderr, "kvm_create_memory_region: %m\n");
266 return -1;
270 kvm_memory_region_save_params(kvm, &low_memory);
271 kvm_memory_region_save_params(kvm, &extended_memory);
273 *vm_mem = mmap(0, memory, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
274 if (*vm_mem == MAP_FAILED) {
275 fprintf(stderr, "mmap: %m\n");
276 return -1;
278 kvm->physical_memory = *vm_mem;
280 zfd = open("/dev/zero", O_RDONLY);
281 mmap(*vm_mem + 0xa8000, 0x8000, PROT_READ|PROT_WRITE,
282 MAP_PRIVATE|MAP_FIXED, zfd, 0);
283 close(zfd);
285 r = kvm_create_vcpu(kvm, 0);
286 if (r < 0)
287 return r;
289 return 0;
292 void *kvm_create_phys_mem(kvm_context_t kvm, unsigned long phys_start,
293 unsigned long len, int slot, int log, int writable)
295 void *ptr;
296 int r;
297 int fd = kvm->vm_fd;
298 int prot = PROT_READ;
299 struct kvm_memory_region memory = {
300 .slot = slot,
301 .memory_size = len,
302 .guest_phys_addr = phys_start,
303 .flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
306 r = ioctl(fd, KVM_SET_MEMORY_REGION, &memory);
307 if (r == -1)
308 return 0;
310 kvm_memory_region_save_params(kvm, &memory);
312 if (writable)
313 prot |= PROT_WRITE;
315 ptr = mmap(0, len, prot, MAP_SHARED, fd, phys_start);
316 if (ptr == MAP_FAILED)
317 return 0;
318 return ptr;
321 void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
322 unsigned long len)
324 //for each memory region in (phys_start, phys_start+len) do
325 // kvm_memory_region_clear_params(kvm, region);
326 kvm_memory_region_clear_params(kvm, 0); /* avoid compiler warning */
327 printf("kvm_destroy_phys_mem: implement me\n");
328 exit(1);
331 int kvm_create_memory_alias(kvm_context_t kvm,
332 int slot,
333 uint64_t phys_start,
334 uint64_t len,
335 uint64_t target_phys)
337 struct kvm_memory_alias alias = {
338 .slot = slot,
339 .flags = 0,
340 .guest_phys_addr = phys_start,
341 .memory_size = len,
342 .target_phys_addr = target_phys,
344 int fd = kvm->vm_fd;
345 int r;
347 r = ioctl(fd, KVM_SET_MEMORY_ALIAS, &alias);
348 if (r == -1)
349 return -errno;
351 return 0;
354 int kvm_destroy_memory_alias(kvm_context_t kvm, int slot)
356 return kvm_create_memory_alias(kvm, slot, 0, 0, 0);
359 static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
361 int r;
362 struct kvm_dirty_log log = {
363 .slot = slot,
366 log.dirty_bitmap = buf;
368 r = ioctl(kvm->vm_fd, ioctl_num, &log);
369 if (r == -1)
370 return -errno;
371 return 0;
374 int kvm_get_dirty_pages(kvm_context_t kvm, int slot, void *buf)
376 return kvm_get_map(kvm, KVM_GET_DIRTY_LOG, slot, buf);
379 int kvm_get_mem_map(kvm_context_t kvm, int slot, void *buf)
381 #ifdef KVM_GET_MEM_MAP
382 return kvm_get_map(kvm, KVM_GET_MEM_MAP, slot, buf);
383 #else /* not KVM_GET_MEM_MAP ==> fake it: all pages exist */
384 unsigned long i, n, m, npages;
385 unsigned char v;
387 if (slot >= KVM_MAX_NUM_MEM_REGIONS) {
388 errno = -EINVAL;
389 return -1;
391 npages = kvm->mem_regions[slot].memory_size / PAGE_SIZE;
392 n = npages / 8;
393 m = npages % 8;
394 memset(buf, 0xff, n); /* all pages exist */
395 v = 0;
396 for (i=0; i<=m; i++) /* last byte may not be "aligned" */
397 v |= 1<<(7-i);
398 if (v)
399 *(unsigned char*)(buf+n) = v;
400 return 0;
401 #endif /* KVM_GET_MEM_MAP */
404 static int handle_io_abi10(kvm_context_t kvm, struct kvm_run_abi10 *run,
405 int vcpu)
407 uint16_t addr = run->io.port;
408 int r;
409 int i;
410 void *p = (void *)run + run->io.data_offset;
412 for (i = 0; i < run->io.count; ++i) {
413 switch (run->io.direction) {
414 case KVM_EXIT_IO_IN:
415 switch (run->io.size) {
416 case 1:
417 r = kvm->callbacks->inb(kvm->opaque, addr, p);
418 break;
419 case 2:
420 r = kvm->callbacks->inw(kvm->opaque, addr, p);
421 break;
422 case 4:
423 r = kvm->callbacks->inl(kvm->opaque, addr, p);
424 break;
425 default:
426 fprintf(stderr, "bad I/O size %d\n", run->io.size);
427 return -EMSGSIZE;
429 break;
430 case KVM_EXIT_IO_OUT:
431 switch (run->io.size) {
432 case 1:
433 r = kvm->callbacks->outb(kvm->opaque, addr,
434 *(uint8_t *)p);
435 break;
436 case 2:
437 r = kvm->callbacks->outw(kvm->opaque, addr,
438 *(uint16_t *)p);
439 break;
440 case 4:
441 r = kvm->callbacks->outl(kvm->opaque, addr,
442 *(uint32_t *)p);
443 break;
444 default:
445 fprintf(stderr, "bad I/O size %d\n", run->io.size);
446 return -EMSGSIZE;
448 break;
449 default:
450 fprintf(stderr, "bad I/O direction %d\n", run->io.direction);
451 return -EPROTO;
454 p += run->io.size;
456 run->io_completed = 1;
458 return 0;
461 static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
463 uint16_t addr = run->io.port;
464 int r;
465 int i;
466 void *p = (void *)run + run->io.data_offset;
468 for (i = 0; i < run->io.count; ++i) {
469 switch (run->io.direction) {
470 case KVM_EXIT_IO_IN:
471 switch (run->io.size) {
472 case 1:
473 r = kvm->callbacks->inb(kvm->opaque, addr, p);
474 break;
475 case 2:
476 r = kvm->callbacks->inw(kvm->opaque, addr, p);
477 break;
478 case 4:
479 r = kvm->callbacks->inl(kvm->opaque, addr, p);
480 break;
481 default:
482 fprintf(stderr, "bad I/O size %d\n", run->io.size);
483 return -EMSGSIZE;
485 break;
486 case KVM_EXIT_IO_OUT:
487 switch (run->io.size) {
488 case 1:
489 r = kvm->callbacks->outb(kvm->opaque, addr,
490 *(uint8_t *)p);
491 break;
492 case 2:
493 r = kvm->callbacks->outw(kvm->opaque, addr,
494 *(uint16_t *)p);
495 break;
496 case 4:
497 r = kvm->callbacks->outl(kvm->opaque, addr,
498 *(uint32_t *)p);
499 break;
500 default:
501 fprintf(stderr, "bad I/O size %d\n", run->io.size);
502 return -EMSGSIZE;
504 break;
505 default:
506 fprintf(stderr, "bad I/O direction %d\n", run->io.direction);
507 return -EPROTO;
510 p += run->io.size;
513 return 0;
516 static int handle_debug(kvm_context_t kvm, int vcpu)
518 return kvm->callbacks->debug(kvm->opaque, vcpu);
521 int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
523 return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_REGS, regs);
526 int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
528 return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, regs);
531 int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
533 return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_FPU, fpu);
536 int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
538 return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_FPU, fpu);
541 int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
543 return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_SREGS, sregs);
546 int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
548 return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SREGS, sregs);
552 * Returns available msr list. User must free.
554 struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
556 struct kvm_msr_list sizer, *msrs;
557 int r, e;
559 sizer.nmsrs = 0;
560 r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, &sizer);
561 if (r == -1 && errno != E2BIG)
562 return NULL;
563 msrs = malloc(sizeof *msrs + sizer.nmsrs * sizeof *msrs->indices);
564 if (!msrs) {
565 errno = ENOMEM;
566 return NULL;
568 msrs->nmsrs = sizer.nmsrs;
569 r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, msrs);
570 if (r == -1) {
571 e = errno;
572 free(msrs);
573 errno = e;
574 return NULL;
576 return msrs;
579 int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
580 int n)
582 struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
583 int r, e;
585 if (!kmsrs) {
586 errno = ENOMEM;
587 return -1;
589 kmsrs->nmsrs = n;
590 memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
591 r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_MSRS, kmsrs);
592 e = errno;
593 memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
594 free(kmsrs);
595 errno = e;
596 return r;
599 int kvm_set_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
600 int n)
602 struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
603 int r, e;
605 if (!kmsrs) {
606 errno = ENOMEM;
607 return -1;
609 kmsrs->nmsrs = n;
610 memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
611 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_MSRS, kmsrs);
612 e = errno;
613 free(kmsrs);
614 errno = e;
615 return r;
618 static void print_seg(FILE *file, const char *name, struct kvm_segment *seg)
620 fprintf(stderr,
621 "%s %04x (%08llx/%08x p %d dpl %d db %d s %d type %x l %d"
622 " g %d avl %d)\n",
623 name, seg->selector, seg->base, seg->limit, seg->present,
624 seg->dpl, seg->db, seg->s, seg->type, seg->l, seg->g,
625 seg->avl);
628 static void print_dt(FILE *file, const char *name, struct kvm_dtable *dt)
630 fprintf(stderr, "%s %llx/%x\n", name, dt->base, dt->limit);
633 void kvm_show_regs(kvm_context_t kvm, int vcpu)
635 int fd = kvm->vcpu_fd[vcpu];
636 struct kvm_regs regs;
637 struct kvm_sregs sregs;
638 int r;
640 r = ioctl(fd, KVM_GET_REGS, &regs);
641 if (r == -1) {
642 perror("KVM_GET_REGS");
643 return;
645 fprintf(stderr,
646 "rax %016llx rbx %016llx rcx %016llx rdx %016llx\n"
647 "rsi %016llx rdi %016llx rsp %016llx rbp %016llx\n"
648 "r8 %016llx r9 %016llx r10 %016llx r11 %016llx\n"
649 "r12 %016llx r13 %016llx r14 %016llx r15 %016llx\n"
650 "rip %016llx rflags %08llx\n",
651 regs.rax, regs.rbx, regs.rcx, regs.rdx,
652 regs.rsi, regs.rdi, regs.rsp, regs.rbp,
653 regs.r8, regs.r9, regs.r10, regs.r11,
654 regs.r12, regs.r13, regs.r14, regs.r15,
655 regs.rip, regs.rflags);
656 r = ioctl(fd, KVM_GET_SREGS, &sregs);
657 if (r == -1) {
658 perror("KVM_GET_SREGS");
659 return;
661 print_seg(stderr, "cs", &sregs.cs);
662 print_seg(stderr, "ds", &sregs.ds);
663 print_seg(stderr, "es", &sregs.es);
664 print_seg(stderr, "ss", &sregs.ss);
665 print_seg(stderr, "fs", &sregs.fs);
666 print_seg(stderr, "gs", &sregs.gs);
667 print_seg(stderr, "tr", &sregs.tr);
668 print_seg(stderr, "ldt", &sregs.ldt);
669 print_dt(stderr, "gdt", &sregs.gdt);
670 print_dt(stderr, "idt", &sregs.idt);
671 fprintf(stderr, "cr0 %llx cr2 %llx cr3 %llx cr4 %llx cr8 %llx"
672 " efer %llx\n",
673 sregs.cr0, sregs.cr2, sregs.cr3, sregs.cr4, sregs.cr8,
674 sregs.efer);
677 static void kvm_show_code(kvm_context_t kvm, int vcpu)
679 #define CR0_PE_MASK (1ULL<<0)
680 int fd = kvm->vcpu_fd[vcpu];
681 struct kvm_regs regs;
682 struct kvm_sregs sregs;
683 int r;
684 unsigned char code[30];
685 char code_str[sizeof(code) * 3 + 1];
686 unsigned long rip;
688 r = ioctl(fd, KVM_GET_SREGS, &sregs);
689 if (r == -1) {
690 perror("KVM_GET_SREGS");
691 return;
693 if (sregs.cr0 & CR0_PE_MASK)
694 return;
696 r = ioctl(fd, KVM_GET_REGS, &regs);
697 if (r == -1) {
698 perror("KVM_GET_REGS");
699 return;
701 rip = sregs.cs.base * 16 + regs.rip;
702 memcpy(code, kvm->physical_memory + rip, sizeof code);
703 *code_str = 0;
704 for (r = 0; r < sizeof code; ++r)
705 sprintf(code_str + strlen(code_str), " %02x", code[r]);
706 fprintf(stderr, "code:%s\n", code_str);
709 static int handle_mmio_abi10(kvm_context_t kvm, struct kvm_run_abi10 *kvm_run)
711 unsigned long addr = kvm_run->mmio.phys_addr;
712 void *data = kvm_run->mmio.data;
713 int r = -1;
715 if (kvm_run->mmio.is_write) {
716 switch (kvm_run->mmio.len) {
717 case 1:
718 r = kvm->callbacks->writeb(kvm->opaque, addr, *(uint8_t *)data);
719 break;
720 case 2:
721 r = kvm->callbacks->writew(kvm->opaque, addr, *(uint16_t *)data);
722 break;
723 case 4:
724 r = kvm->callbacks->writel(kvm->opaque, addr, *(uint32_t *)data);
725 break;
726 case 8:
727 r = kvm->callbacks->writeq(kvm->opaque, addr, *(uint64_t *)data);
728 break;
730 } else {
731 switch (kvm_run->mmio.len) {
732 case 1:
733 r = kvm->callbacks->readb(kvm->opaque, addr, (uint8_t *)data);
734 break;
735 case 2:
736 r = kvm->callbacks->readw(kvm->opaque, addr, (uint16_t *)data);
737 break;
738 case 4:
739 r = kvm->callbacks->readl(kvm->opaque, addr, (uint32_t *)data);
740 break;
741 case 8:
742 r = kvm->callbacks->readq(kvm->opaque, addr, (uint64_t *)data);
743 break;
745 kvm_run->io_completed = 1;
747 return r;
750 static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
752 unsigned long addr = kvm_run->mmio.phys_addr;
753 void *data = kvm_run->mmio.data;
754 int r = -1;
756 if (kvm_run->mmio.is_write) {
757 switch (kvm_run->mmio.len) {
758 case 1:
759 r = kvm->callbacks->writeb(kvm->opaque, addr, *(uint8_t *)data);
760 break;
761 case 2:
762 r = kvm->callbacks->writew(kvm->opaque, addr, *(uint16_t *)data);
763 break;
764 case 4:
765 r = kvm->callbacks->writel(kvm->opaque, addr, *(uint32_t *)data);
766 break;
767 case 8:
768 r = kvm->callbacks->writeq(kvm->opaque, addr, *(uint64_t *)data);
769 break;
771 } else {
772 switch (kvm_run->mmio.len) {
773 case 1:
774 r = kvm->callbacks->readb(kvm->opaque, addr, (uint8_t *)data);
775 break;
776 case 2:
777 r = kvm->callbacks->readw(kvm->opaque, addr, (uint16_t *)data);
778 break;
779 case 4:
780 r = kvm->callbacks->readl(kvm->opaque, addr, (uint32_t *)data);
781 break;
782 case 8:
783 r = kvm->callbacks->readq(kvm->opaque, addr, (uint64_t *)data);
784 break;
787 return r;
790 static int handle_io_window(kvm_context_t kvm)
792 return kvm->callbacks->io_window(kvm->opaque);
795 static int handle_halt(kvm_context_t kvm, int vcpu)
797 return kvm->callbacks->halt(kvm->opaque, vcpu);
800 static int handle_shutdown(kvm_context_t kvm, int vcpu)
802 return kvm->callbacks->shutdown(kvm->opaque, vcpu);
805 int try_push_interrupts(kvm_context_t kvm)
807 return kvm->callbacks->try_push_interrupts(kvm->opaque);
810 static void post_kvm_run(kvm_context_t kvm, int vcpu)
812 kvm->callbacks->post_kvm_run(kvm->opaque, vcpu);
815 static void pre_kvm_run(kvm_context_t kvm, int vcpu)
817 kvm->callbacks->pre_kvm_run(kvm->opaque, vcpu);
820 int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
822 struct kvm_run *run = kvm->run[vcpu];
824 if (kvm_abi == 10)
825 return ((struct kvm_run_abi10 *)run)->if_flag;
826 return run->if_flag;
829 uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu)
831 struct kvm_run *run = kvm->run[vcpu];
833 if (kvm_abi == 10)
834 return ((struct kvm_run_abi10 *)run)->apic_base;
835 return run->apic_base;
838 int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu)
840 struct kvm_run *run = kvm->run[vcpu];
842 if (kvm_abi == 10)
843 return ((struct kvm_run_abi10 *)run)->ready_for_interrupt_injection;
844 return run->ready_for_interrupt_injection;
847 void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8)
849 struct kvm_run *run = kvm->run[vcpu];
851 if (kvm_abi == 10) {
852 ((struct kvm_run_abi10 *)run)->cr8 = cr8;
853 return;
855 run->cr8 = cr8;
858 static int kvm_run_abi10(kvm_context_t kvm, int vcpu)
860 int r;
861 int fd = kvm->vcpu_fd[vcpu];
862 struct kvm_run_abi10 *run = (struct kvm_run_abi10 *)kvm->run[vcpu];
864 again:
865 run->request_interrupt_window = try_push_interrupts(kvm);
866 pre_kvm_run(kvm, vcpu);
867 r = ioctl(fd, KVM_RUN, 0);
868 post_kvm_run(kvm, vcpu);
870 run->io_completed = 0;
871 if (r == -1 && errno != EINTR) {
872 r = -errno;
873 printf("kvm_run: %m\n");
874 return r;
876 if (r == -1) {
877 r = handle_io_window(kvm);
878 goto more;
880 if (1) {
881 switch (run->exit_reason) {
882 case KVM_EXIT_UNKNOWN:
883 fprintf(stderr, "unhandled vm exit: 0x%x\n",
884 (unsigned)run->hw.hardware_exit_reason);
885 kvm_show_regs(kvm, vcpu);
886 abort();
887 break;
888 case KVM_EXIT_FAIL_ENTRY:
889 fprintf(stderr, "kvm_run: failed entry, reason %u\n",
890 (unsigned)run->fail_entry.hardware_entry_failure_reason & 0xffff);
891 return -ENOEXEC;
892 break;
893 case KVM_EXIT_EXCEPTION:
894 fprintf(stderr, "exception %d (%x)\n",
895 run->ex.exception,
896 run->ex.error_code);
897 kvm_show_regs(kvm, vcpu);
898 kvm_show_code(kvm, vcpu);
899 abort();
900 break;
901 case KVM_EXIT_IO:
902 r = handle_io_abi10(kvm, run, vcpu);
903 break;
904 case KVM_EXIT_DEBUG:
905 r = handle_debug(kvm, vcpu);
906 break;
907 case KVM_EXIT_MMIO:
908 r = handle_mmio_abi10(kvm, run);
909 break;
910 case KVM_EXIT_HLT:
911 r = handle_halt(kvm, vcpu);
912 break;
913 case KVM_EXIT_IRQ_WINDOW_OPEN:
914 break;
915 case KVM_EXIT_SHUTDOWN:
916 r = handle_shutdown(kvm, vcpu);
917 break;
918 default:
919 fprintf(stderr, "unhandled vm exit: 0x%x\n", run->exit_reason);
920 kvm_show_regs(kvm, vcpu);
921 abort();
922 break;
925 more:
926 if (!r)
927 goto again;
928 return r;
931 int kvm_run(kvm_context_t kvm, int vcpu)
933 int r;
934 int fd = kvm->vcpu_fd[vcpu];
935 struct kvm_run *run = kvm->run[vcpu];
937 if (kvm_abi == 10)
938 return kvm_run_abi10(kvm, vcpu);
940 again:
941 run->request_interrupt_window = try_push_interrupts(kvm);
942 pre_kvm_run(kvm, vcpu);
943 r = ioctl(fd, KVM_RUN, 0);
944 post_kvm_run(kvm, vcpu);
946 if (r == -1 && errno != EINTR) {
947 r = -errno;
948 printf("kvm_run: %m\n");
949 return r;
951 if (r == -1) {
952 r = handle_io_window(kvm);
953 goto more;
955 if (1) {
956 switch (run->exit_reason) {
957 case KVM_EXIT_UNKNOWN:
958 fprintf(stderr, "unhandled vm exit: 0x%x\n",
959 (unsigned)run->hw.hardware_exit_reason);
960 kvm_show_regs(kvm, vcpu);
961 abort();
962 break;
963 case KVM_EXIT_FAIL_ENTRY:
964 fprintf(stderr, "kvm_run: failed entry, reason %u\n",
965 (unsigned)run->fail_entry.hardware_entry_failure_reason & 0xffff);
966 return -ENOEXEC;
967 break;
968 case KVM_EXIT_EXCEPTION:
969 fprintf(stderr, "exception %d (%x)\n",
970 run->ex.exception,
971 run->ex.error_code);
972 kvm_show_regs(kvm, vcpu);
973 kvm_show_code(kvm, vcpu);
974 abort();
975 break;
976 case KVM_EXIT_IO:
977 r = handle_io(kvm, run, vcpu);
978 break;
979 case KVM_EXIT_DEBUG:
980 r = handle_debug(kvm, vcpu);
981 break;
982 case KVM_EXIT_MMIO:
983 r = handle_mmio(kvm, run);
984 break;
985 case KVM_EXIT_HLT:
986 r = handle_halt(kvm, vcpu);
987 break;
988 case KVM_EXIT_IRQ_WINDOW_OPEN:
989 break;
990 case KVM_EXIT_SHUTDOWN:
991 r = handle_shutdown(kvm, vcpu);
992 break;
993 default:
994 fprintf(stderr, "unhandled vm exit: 0x%x\n", run->exit_reason);
995 kvm_show_regs(kvm, vcpu);
996 abort();
997 break;
1000 more:
1001 if (!r)
1002 goto again;
1003 return r;
1006 int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
1008 struct kvm_interrupt intr;
1010 intr.irq = irq;
1011 return ioctl(kvm->vcpu_fd[vcpu], KVM_INTERRUPT, &intr);
1014 int kvm_guest_debug(kvm_context_t kvm, int vcpu, struct kvm_debug_guest *dbg)
1016 return ioctl(kvm->vcpu_fd[vcpu], KVM_DEBUG_GUEST, dbg);
1019 int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
1020 struct kvm_cpuid_entry *entries)
1022 struct kvm_cpuid *cpuid;
1023 int r;
1025 cpuid = malloc(sizeof(*cpuid) + nent * sizeof(*entries));
1026 if (!cpuid)
1027 return -ENOMEM;
1029 cpuid->nent = nent;
1030 memcpy(cpuid->entries, entries, nent * sizeof(*entries));
1031 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID, cpuid);
1033 free(cpuid);
1034 return r;
1037 int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
1039 struct kvm_signal_mask *sigmask;
1040 int r;
1042 if (!sigset) {
1043 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, NULL);
1044 if (r == -1)
1045 r = -errno;
1046 return r;
1048 sigmask = malloc(sizeof(*sigmask) + sizeof(*sigset));
1049 if (!sigmask)
1050 return -ENOMEM;
1052 sigmask->len = 8;
1053 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1054 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, sigmask);
1055 if (r == -1)
1056 r = -errno;
1057 free(sigmask);
1058 return r;