kvm: libkvm: drop unnecessary inclide
[qemu-kvm/fedora.git] / kvm / libkvm / libkvm-x86.c
blob2fc4fcee80befd28137673b1c263e30eae610eae
1 #include "libkvm.h"
2 #include "kvm-x86.h"
3 #include <errno.h>
4 #include <sys/ioctl.h>
5 #include <string.h>
6 #include <unistd.h>
7 #include <sys/mman.h>
8 #include <stdio.h>
9 #include <errno.h>
10 #include <sys/types.h>
11 #include <sys/stat.h>
12 #include <fcntl.h>
13 #include <stdlib.h>
15 int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr)
17 #ifdef KVM_CAP_SET_TSS_ADDR
18 int r;
20 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
21 if (r > 0) {
22 r = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, addr);
23 if (r == -1) {
24 fprintf(stderr, "kvm_set_tss_addr: %m\n");
25 return -errno;
27 return 0;
29 #endif
30 return -ENOSYS;
33 static int kvm_init_tss(kvm_context_t kvm)
35 #ifdef KVM_CAP_SET_TSS_ADDR
36 int r;
38 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
39 if (r > 0) {
41 * this address is 3 pages before the bios, and the bios should present
42 * as unavaible memory
44 r = kvm_set_tss_addr(kvm, 0xfffbd000);
45 if (r < 0) {
46 fprintf(stderr, "kvm_init_tss: unable to set tss addr\n");
47 return r;
51 #endif
52 return 0;
55 int kvm_create_pit(kvm_context_t kvm)
57 #ifdef KVM_CAP_PIT
58 int r;
60 kvm->pit_in_kernel = 0;
61 if (!kvm->no_pit_creation) {
62 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_PIT);
63 if (r > 0) {
64 r = ioctl(kvm->vm_fd, KVM_CREATE_PIT);
65 if (r >= 0)
66 kvm->pit_in_kernel = 1;
67 else {
68 fprintf(stderr, "Create kernel PIC irqchip failed\n");
69 return r;
73 #endif
74 return 0;
77 int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
78 void **vm_mem)
80 int r = 0;
82 r = kvm_init_tss(kvm);
83 if (r < 0)
84 return r;
86 r = kvm_create_pit(kvm);
87 if (r < 0)
88 return r;
90 r = kvm_init_coalesced_mmio(kvm);
91 if (r < 0)
92 return r;
94 return 0;
97 #ifdef KVM_EXIT_TPR_ACCESS
99 static int handle_tpr_access(kvm_context_t kvm, struct kvm_run *run, int vcpu)
101 return kvm->callbacks->tpr_access(kvm->opaque, vcpu,
102 run->tpr_access.rip,
103 run->tpr_access.is_write);
107 int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic)
109 int r;
110 struct kvm_vapic_addr va = {
111 .vapic_addr = vapic,
114 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_VAPIC_ADDR, &va);
115 if (r == -1) {
116 r = -errno;
117 perror("kvm_enable_vapic");
118 return r;
120 return 0;
123 #endif
125 int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
127 int r = 0;
129 switch (run->exit_reason) {
130 #ifdef KVM_EXIT_SET_TPR
131 case KVM_EXIT_SET_TPR:
132 break;
133 #endif
134 #ifdef KVM_EXIT_TPR_ACCESS
135 case KVM_EXIT_TPR_ACCESS:
136 r = handle_tpr_access(kvm, run, vcpu);
137 break;
138 #endif
139 default:
140 r = 1;
141 break;
144 return r;
147 #define MAX_ALIAS_SLOTS 4
148 static struct {
149 uint64_t start;
150 uint64_t len;
151 } kvm_aliases[MAX_ALIAS_SLOTS];
153 static int get_alias_slot(uint64_t start)
155 int i;
157 for (i=0; i<MAX_ALIAS_SLOTS; i++)
158 if (kvm_aliases[i].start == start)
159 return i;
160 return -1;
162 static int get_free_alias_slot(void)
164 int i;
166 for (i=0; i<MAX_ALIAS_SLOTS; i++)
167 if (kvm_aliases[i].len == 0)
168 return i;
169 return -1;
172 static void register_alias(int slot, uint64_t start, uint64_t len)
174 kvm_aliases[slot].start = start;
175 kvm_aliases[slot].len = len;
178 int kvm_create_memory_alias(kvm_context_t kvm,
179 uint64_t phys_start,
180 uint64_t len,
181 uint64_t target_phys)
183 struct kvm_memory_alias alias = {
184 .flags = 0,
185 .guest_phys_addr = phys_start,
186 .memory_size = len,
187 .target_phys_addr = target_phys,
189 int fd = kvm->vm_fd;
190 int r;
191 int slot;
193 slot = get_alias_slot(phys_start);
194 if (slot < 0)
195 slot = get_free_alias_slot();
196 if (slot < 0)
197 return -EBUSY;
198 alias.slot = slot;
200 r = ioctl(fd, KVM_SET_MEMORY_ALIAS, &alias);
201 if (r == -1)
202 return -errno;
204 register_alias(slot, phys_start, len);
205 return 0;
208 int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start)
210 return kvm_create_memory_alias(kvm, phys_start, 0, 0);
213 #ifdef KVM_CAP_IRQCHIP
215 int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
217 int r;
218 if (!kvm->irqchip_in_kernel)
219 return 0;
220 r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_LAPIC, s);
221 if (r == -1) {
222 r = -errno;
223 perror("kvm_get_lapic");
225 return r;
228 int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
230 int r;
231 if (!kvm->irqchip_in_kernel)
232 return 0;
233 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_LAPIC, s);
234 if (r == -1) {
235 r = -errno;
236 perror("kvm_set_lapic");
238 return r;
241 #endif
243 #ifdef KVM_CAP_PIT
245 int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s)
247 int r;
248 if (!kvm->pit_in_kernel)
249 return 0;
250 r = ioctl(kvm->vm_fd, KVM_GET_PIT, s);
251 if (r == -1) {
252 r = -errno;
253 perror("kvm_get_pit");
255 return r;
258 int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s)
260 int r;
261 if (!kvm->pit_in_kernel)
262 return 0;
263 r = ioctl(kvm->vm_fd, KVM_SET_PIT, s);
264 if (r == -1) {
265 r = -errno;
266 perror("kvm_set_pit");
268 return r;
271 #endif
273 void kvm_show_code(kvm_context_t kvm, int vcpu)
275 #define SHOW_CODE_LEN 50
276 int fd = kvm->vcpu_fd[vcpu];
277 struct kvm_regs regs;
278 struct kvm_sregs sregs;
279 int r, n;
280 int back_offset;
281 unsigned char code;
282 char code_str[SHOW_CODE_LEN * 3 + 1];
283 unsigned long rip;
285 r = ioctl(fd, KVM_GET_SREGS, &sregs);
286 if (r == -1) {
287 perror("KVM_GET_SREGS");
288 return;
290 r = ioctl(fd, KVM_GET_REGS, &regs);
291 if (r == -1) {
292 perror("KVM_GET_REGS");
293 return;
295 rip = sregs.cs.base + regs.rip;
296 back_offset = regs.rip;
297 if (back_offset > 20)
298 back_offset = 20;
299 *code_str = 0;
300 for (n = -back_offset; n < SHOW_CODE_LEN-back_offset; ++n) {
301 if (n == 0)
302 strcat(code_str, " -->");
303 r = kvm->callbacks->mmio_read(kvm->opaque, rip + n, &code, 1);
304 if (r < 0) {
305 strcat(code_str, " xx");
306 continue;
308 sprintf(code_str + strlen(code_str), " %02x", code);
310 fprintf(stderr, "code:%s\n", code_str);
315 * Returns available msr list. User must free.
317 struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
319 struct kvm_msr_list sizer, *msrs;
320 int r, e;
322 sizer.nmsrs = 0;
323 r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, &sizer);
324 if (r == -1 && errno != E2BIG)
325 return NULL;
326 msrs = malloc(sizeof *msrs + sizer.nmsrs * sizeof *msrs->indices);
327 if (!msrs) {
328 errno = ENOMEM;
329 return NULL;
331 msrs->nmsrs = sizer.nmsrs;
332 r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, msrs);
333 if (r == -1) {
334 e = errno;
335 free(msrs);
336 errno = e;
337 return NULL;
339 return msrs;
342 int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
343 int n)
345 struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
346 int r, e;
348 if (!kmsrs) {
349 errno = ENOMEM;
350 return -1;
352 kmsrs->nmsrs = n;
353 memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
354 r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_MSRS, kmsrs);
355 e = errno;
356 memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
357 free(kmsrs);
358 errno = e;
359 return r;
362 int kvm_set_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
363 int n)
365 struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
366 int r, e;
368 if (!kmsrs) {
369 errno = ENOMEM;
370 return -1;
372 kmsrs->nmsrs = n;
373 memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
374 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_MSRS, kmsrs);
375 e = errno;
376 free(kmsrs);
377 errno = e;
378 return r;
381 static void print_seg(FILE *file, const char *name, struct kvm_segment *seg)
383 fprintf(stderr,
384 "%s %04x (%08llx/%08x p %d dpl %d db %d s %d type %x l %d"
385 " g %d avl %d)\n",
386 name, seg->selector, seg->base, seg->limit, seg->present,
387 seg->dpl, seg->db, seg->s, seg->type, seg->l, seg->g,
388 seg->avl);
391 static void print_dt(FILE *file, const char *name, struct kvm_dtable *dt)
393 fprintf(stderr, "%s %llx/%x\n", name, dt->base, dt->limit);
396 void kvm_show_regs(kvm_context_t kvm, int vcpu)
398 int fd = kvm->vcpu_fd[vcpu];
399 struct kvm_regs regs;
400 struct kvm_sregs sregs;
401 int r;
403 r = ioctl(fd, KVM_GET_REGS, &regs);
404 if (r == -1) {
405 perror("KVM_GET_REGS");
406 return;
408 fprintf(stderr,
409 "rax %016llx rbx %016llx rcx %016llx rdx %016llx\n"
410 "rsi %016llx rdi %016llx rsp %016llx rbp %016llx\n"
411 "r8 %016llx r9 %016llx r10 %016llx r11 %016llx\n"
412 "r12 %016llx r13 %016llx r14 %016llx r15 %016llx\n"
413 "rip %016llx rflags %08llx\n",
414 regs.rax, regs.rbx, regs.rcx, regs.rdx,
415 regs.rsi, regs.rdi, regs.rsp, regs.rbp,
416 regs.r8, regs.r9, regs.r10, regs.r11,
417 regs.r12, regs.r13, regs.r14, regs.r15,
418 regs.rip, regs.rflags);
419 r = ioctl(fd, KVM_GET_SREGS, &sregs);
420 if (r == -1) {
421 perror("KVM_GET_SREGS");
422 return;
424 print_seg(stderr, "cs", &sregs.cs);
425 print_seg(stderr, "ds", &sregs.ds);
426 print_seg(stderr, "es", &sregs.es);
427 print_seg(stderr, "ss", &sregs.ss);
428 print_seg(stderr, "fs", &sregs.fs);
429 print_seg(stderr, "gs", &sregs.gs);
430 print_seg(stderr, "tr", &sregs.tr);
431 print_seg(stderr, "ldt", &sregs.ldt);
432 print_dt(stderr, "gdt", &sregs.gdt);
433 print_dt(stderr, "idt", &sregs.idt);
434 fprintf(stderr, "cr0 %llx cr2 %llx cr3 %llx cr4 %llx cr8 %llx"
435 " efer %llx\n",
436 sregs.cr0, sregs.cr2, sregs.cr3, sregs.cr4, sregs.cr8,
437 sregs.efer);
440 uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu)
442 struct kvm_run *run = kvm->run[vcpu];
444 return run->apic_base;
447 void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8)
449 struct kvm_run *run = kvm->run[vcpu];
451 run->cr8 = cr8;
454 __u64 kvm_get_cr8(kvm_context_t kvm, int vcpu)
456 return kvm->run[vcpu]->cr8;
459 int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
460 struct kvm_cpuid_entry *entries)
462 struct kvm_cpuid *cpuid;
463 int r;
465 cpuid = malloc(sizeof(*cpuid) + nent * sizeof(*entries));
466 if (!cpuid)
467 return -ENOMEM;
469 cpuid->nent = nent;
470 memcpy(cpuid->entries, entries, nent * sizeof(*entries));
471 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID, cpuid);
473 free(cpuid);
474 return r;
477 int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
478 struct kvm_cpuid_entry2 *entries)
480 struct kvm_cpuid2 *cpuid;
481 int r;
483 cpuid = malloc(sizeof(*cpuid) + nent * sizeof(*entries));
484 if (!cpuid)
485 return -ENOMEM;
487 cpuid->nent = nent;
488 memcpy(cpuid->entries, entries, nent * sizeof(*entries));
489 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID2, cpuid);
490 if (r == -1) {
491 fprintf(stderr, "kvm_setup_cpuid2: %m\n");
492 return -errno;
494 free(cpuid);
495 return r;
498 int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
500 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
501 int r;
503 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
504 KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
505 if (r > 0) {
506 r = ioctl(kvm->vm_fd, KVM_SET_NR_MMU_PAGES, nrshadow_pages);
507 if (r == -1) {
508 fprintf(stderr, "kvm_set_shadow_pages: %m\n");
509 return -errno;
511 return 0;
513 #endif
514 return -1;
517 int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
519 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
520 int r;
522 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
523 KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
524 if (r > 0) {
525 *nrshadow_pages = ioctl(kvm->vm_fd, KVM_GET_NR_MMU_PAGES);
526 return 0;
528 #endif
529 return -1;
532 #ifdef KVM_CAP_VAPIC
534 static int tpr_access_reporting(kvm_context_t kvm, int vcpu, int enabled)
536 int r;
537 struct kvm_tpr_access_ctl tac = {
538 .enabled = enabled,
541 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC);
542 if (r == -1 || r == 0)
543 return -ENOSYS;
544 r = ioctl(kvm->vcpu_fd[vcpu], KVM_TPR_ACCESS_REPORTING, &tac);
545 if (r == -1) {
546 r = -errno;
547 perror("KVM_TPR_ACCESS_REPORTING");
548 return r;
550 return 0;
553 int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
555 return tpr_access_reporting(kvm, vcpu, 1);
558 int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
560 return tpr_access_reporting(kvm, vcpu, 0);
563 #endif