ac97: IOMMU support
[qemu-kvm/amd-iommu.git] / kvm / libkvm / libkvm-x86.c
blobf1aef7617a035bc02446ca1477da83edddf92cb3
1 #include "libkvm.h"
2 #include "kvm-x86.h"
3 #include <errno.h>
4 #include <sys/ioctl.h>
5 #include <string.h>
6 #include <unistd.h>
7 #include <sys/mman.h>
8 #include <stdio.h>
9 #include <errno.h>
10 #include <sys/types.h>
11 #include <sys/stat.h>
12 #include <fcntl.h>
13 #include <stdlib.h>
15 int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr)
17 #ifdef KVM_CAP_SET_TSS_ADDR
18 int r;
20 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
21 if (r > 0) {
22 r = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, addr);
23 if (r == -1) {
24 fprintf(stderr, "kvm_set_tss_addr: %m\n");
25 return -errno;
27 return 0;
29 #endif
30 return -ENOSYS;
33 static int kvm_init_tss(kvm_context_t kvm)
35 #ifdef KVM_CAP_SET_TSS_ADDR
36 int r;
38 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
39 if (r > 0) {
41 * this address is 3 pages before the bios, and the bios should present
42 * as unavaible memory
44 r = kvm_set_tss_addr(kvm, 0xfffbd000);
45 if (r < 0) {
46 fprintf(stderr, "kvm_init_tss: unable to set tss addr\n");
47 return r;
51 #endif
52 return 0;
55 static int kvm_create_pit(kvm_context_t kvm)
57 #ifdef KVM_CAP_PIT
58 int r;
60 kvm->pit_in_kernel = 0;
61 if (!kvm->no_pit_creation) {
62 #ifdef KVM_CAP_PIT2
63 struct kvm_pit_config config = { .flags = 0 };
65 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_PIT2);
66 if (r > 0)
67 r = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &config);
68 else
69 #endif
71 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_PIT);
72 if (r <= 0)
73 return 0;
75 r = ioctl(kvm->vm_fd, KVM_CREATE_PIT);
77 if (r < 0) {
78 fprintf(stderr, "Create kernel PIC irqchip failed\n");
79 return r;
81 kvm->pit_in_kernel = 1;
83 #endif
84 return 0;
87 int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
88 void **vm_mem)
90 int r = 0;
92 r = kvm_init_tss(kvm);
93 if (r < 0)
94 return r;
96 r = kvm_create_pit(kvm);
97 if (r < 0)
98 return r;
100 r = kvm_init_coalesced_mmio(kvm);
101 if (r < 0)
102 return r;
104 return 0;
107 #ifdef KVM_EXIT_TPR_ACCESS
109 static int handle_tpr_access(kvm_context_t kvm, struct kvm_run *run, int vcpu)
111 return kvm->callbacks->tpr_access(kvm->opaque, vcpu,
112 run->tpr_access.rip,
113 run->tpr_access.is_write);
117 int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic)
119 int r;
120 struct kvm_vapic_addr va = {
121 .vapic_addr = vapic,
124 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_VAPIC_ADDR, &va);
125 if (r == -1) {
126 r = -errno;
127 perror("kvm_enable_vapic");
128 return r;
130 return 0;
133 #endif
135 int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
137 int r = 0;
139 switch (run->exit_reason) {
140 #ifdef KVM_EXIT_SET_TPR
141 case KVM_EXIT_SET_TPR:
142 break;
143 #endif
144 #ifdef KVM_EXIT_TPR_ACCESS
145 case KVM_EXIT_TPR_ACCESS:
146 r = handle_tpr_access(kvm, run, vcpu);
147 break;
148 #endif
149 default:
150 r = 1;
151 break;
154 return r;
157 #define MAX_ALIAS_SLOTS 4
158 static struct {
159 uint64_t start;
160 uint64_t len;
161 } kvm_aliases[MAX_ALIAS_SLOTS];
163 static int get_alias_slot(uint64_t start)
165 int i;
167 for (i=0; i<MAX_ALIAS_SLOTS; i++)
168 if (kvm_aliases[i].start == start)
169 return i;
170 return -1;
172 static int get_free_alias_slot(void)
174 int i;
176 for (i=0; i<MAX_ALIAS_SLOTS; i++)
177 if (kvm_aliases[i].len == 0)
178 return i;
179 return -1;
182 static void register_alias(int slot, uint64_t start, uint64_t len)
184 kvm_aliases[slot].start = start;
185 kvm_aliases[slot].len = len;
188 int kvm_create_memory_alias(kvm_context_t kvm,
189 uint64_t phys_start,
190 uint64_t len,
191 uint64_t target_phys)
193 struct kvm_memory_alias alias = {
194 .flags = 0,
195 .guest_phys_addr = phys_start,
196 .memory_size = len,
197 .target_phys_addr = target_phys,
199 int fd = kvm->vm_fd;
200 int r;
201 int slot;
203 slot = get_alias_slot(phys_start);
204 if (slot < 0)
205 slot = get_free_alias_slot();
206 if (slot < 0)
207 return -EBUSY;
208 alias.slot = slot;
210 r = ioctl(fd, KVM_SET_MEMORY_ALIAS, &alias);
211 if (r == -1)
212 return -errno;
214 register_alias(slot, phys_start, len);
215 return 0;
218 int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start)
220 return kvm_create_memory_alias(kvm, phys_start, 0, 0);
223 #ifdef KVM_CAP_IRQCHIP
225 int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
227 int r;
228 if (!kvm->irqchip_in_kernel)
229 return 0;
230 r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_LAPIC, s);
231 if (r == -1) {
232 r = -errno;
233 perror("kvm_get_lapic");
235 return r;
238 int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
240 int r;
241 if (!kvm->irqchip_in_kernel)
242 return 0;
243 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_LAPIC, s);
244 if (r == -1) {
245 r = -errno;
246 perror("kvm_set_lapic");
248 return r;
251 #endif
253 #ifdef KVM_CAP_PIT
255 int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s)
257 int r;
258 if (!kvm->pit_in_kernel)
259 return 0;
260 r = ioctl(kvm->vm_fd, KVM_GET_PIT, s);
261 if (r == -1) {
262 r = -errno;
263 perror("kvm_get_pit");
265 return r;
268 int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s)
270 int r;
271 if (!kvm->pit_in_kernel)
272 return 0;
273 r = ioctl(kvm->vm_fd, KVM_SET_PIT, s);
274 if (r == -1) {
275 r = -errno;
276 perror("kvm_set_pit");
278 return r;
281 #endif
283 void kvm_show_code(kvm_context_t kvm, int vcpu)
285 #define SHOW_CODE_LEN 50
286 int fd = kvm->vcpu_fd[vcpu];
287 struct kvm_regs regs;
288 struct kvm_sregs sregs;
289 int r, n;
290 int back_offset;
291 unsigned char code;
292 char code_str[SHOW_CODE_LEN * 3 + 1];
293 unsigned long rip;
295 r = ioctl(fd, KVM_GET_SREGS, &sregs);
296 if (r == -1) {
297 perror("KVM_GET_SREGS");
298 return;
300 r = ioctl(fd, KVM_GET_REGS, &regs);
301 if (r == -1) {
302 perror("KVM_GET_REGS");
303 return;
305 rip = sregs.cs.base + regs.rip;
306 back_offset = regs.rip;
307 if (back_offset > 20)
308 back_offset = 20;
309 *code_str = 0;
310 for (n = -back_offset; n < SHOW_CODE_LEN-back_offset; ++n) {
311 if (n == 0)
312 strcat(code_str, " -->");
313 r = kvm->callbacks->mmio_read(kvm->opaque, rip + n, &code, 1);
314 if (r < 0) {
315 strcat(code_str, " xx");
316 continue;
318 sprintf(code_str + strlen(code_str), " %02x", code);
320 fprintf(stderr, "code:%s\n", code_str);
325 * Returns available msr list. User must free.
327 struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
329 struct kvm_msr_list sizer, *msrs;
330 int r, e;
332 sizer.nmsrs = 0;
333 r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, &sizer);
334 if (r == -1 && errno != E2BIG)
335 return NULL;
336 msrs = malloc(sizeof *msrs + sizer.nmsrs * sizeof *msrs->indices);
337 if (!msrs) {
338 errno = ENOMEM;
339 return NULL;
341 msrs->nmsrs = sizer.nmsrs;
342 r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, msrs);
343 if (r == -1) {
344 e = errno;
345 free(msrs);
346 errno = e;
347 return NULL;
349 return msrs;
352 int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
353 int n)
355 struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
356 int r, e;
358 if (!kmsrs) {
359 errno = ENOMEM;
360 return -1;
362 kmsrs->nmsrs = n;
363 memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
364 r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_MSRS, kmsrs);
365 e = errno;
366 memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
367 free(kmsrs);
368 errno = e;
369 return r;
372 int kvm_set_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
373 int n)
375 struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
376 int r, e;
378 if (!kmsrs) {
379 errno = ENOMEM;
380 return -1;
382 kmsrs->nmsrs = n;
383 memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
384 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_MSRS, kmsrs);
385 e = errno;
386 free(kmsrs);
387 errno = e;
388 return r;
391 static void print_seg(FILE *file, const char *name, struct kvm_segment *seg)
393 fprintf(stderr,
394 "%s %04x (%08llx/%08x p %d dpl %d db %d s %d type %x l %d"
395 " g %d avl %d)\n",
396 name, seg->selector, seg->base, seg->limit, seg->present,
397 seg->dpl, seg->db, seg->s, seg->type, seg->l, seg->g,
398 seg->avl);
401 static void print_dt(FILE *file, const char *name, struct kvm_dtable *dt)
403 fprintf(stderr, "%s %llx/%x\n", name, dt->base, dt->limit);
406 void kvm_show_regs(kvm_context_t kvm, int vcpu)
408 int fd = kvm->vcpu_fd[vcpu];
409 struct kvm_regs regs;
410 struct kvm_sregs sregs;
411 int r;
413 r = ioctl(fd, KVM_GET_REGS, &regs);
414 if (r == -1) {
415 perror("KVM_GET_REGS");
416 return;
418 fprintf(stderr,
419 "rax %016llx rbx %016llx rcx %016llx rdx %016llx\n"
420 "rsi %016llx rdi %016llx rsp %016llx rbp %016llx\n"
421 "r8 %016llx r9 %016llx r10 %016llx r11 %016llx\n"
422 "r12 %016llx r13 %016llx r14 %016llx r15 %016llx\n"
423 "rip %016llx rflags %08llx\n",
424 regs.rax, regs.rbx, regs.rcx, regs.rdx,
425 regs.rsi, regs.rdi, regs.rsp, regs.rbp,
426 regs.r8, regs.r9, regs.r10, regs.r11,
427 regs.r12, regs.r13, regs.r14, regs.r15,
428 regs.rip, regs.rflags);
429 r = ioctl(fd, KVM_GET_SREGS, &sregs);
430 if (r == -1) {
431 perror("KVM_GET_SREGS");
432 return;
434 print_seg(stderr, "cs", &sregs.cs);
435 print_seg(stderr, "ds", &sregs.ds);
436 print_seg(stderr, "es", &sregs.es);
437 print_seg(stderr, "ss", &sregs.ss);
438 print_seg(stderr, "fs", &sregs.fs);
439 print_seg(stderr, "gs", &sregs.gs);
440 print_seg(stderr, "tr", &sregs.tr);
441 print_seg(stderr, "ldt", &sregs.ldt);
442 print_dt(stderr, "gdt", &sregs.gdt);
443 print_dt(stderr, "idt", &sregs.idt);
444 fprintf(stderr, "cr0 %llx cr2 %llx cr3 %llx cr4 %llx cr8 %llx"
445 " efer %llx\n",
446 sregs.cr0, sregs.cr2, sregs.cr3, sregs.cr4, sregs.cr8,
447 sregs.efer);
450 uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu)
452 struct kvm_run *run = kvm->run[vcpu];
454 return run->apic_base;
457 void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8)
459 struct kvm_run *run = kvm->run[vcpu];
461 run->cr8 = cr8;
464 __u64 kvm_get_cr8(kvm_context_t kvm, int vcpu)
466 return kvm->run[vcpu]->cr8;
469 int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
470 struct kvm_cpuid_entry *entries)
472 struct kvm_cpuid *cpuid;
473 int r;
475 cpuid = malloc(sizeof(*cpuid) + nent * sizeof(*entries));
476 if (!cpuid)
477 return -ENOMEM;
479 cpuid->nent = nent;
480 memcpy(cpuid->entries, entries, nent * sizeof(*entries));
481 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID, cpuid);
483 free(cpuid);
484 return r;
487 int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
488 struct kvm_cpuid_entry2 *entries)
490 struct kvm_cpuid2 *cpuid;
491 int r;
493 cpuid = malloc(sizeof(*cpuid) + nent * sizeof(*entries));
494 if (!cpuid)
495 return -ENOMEM;
497 cpuid->nent = nent;
498 memcpy(cpuid->entries, entries, nent * sizeof(*entries));
499 r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID2, cpuid);
500 if (r == -1) {
501 fprintf(stderr, "kvm_setup_cpuid2: %m\n");
502 r = -errno;
504 free(cpuid);
505 return r;
508 int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
510 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
511 int r;
513 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
514 KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
515 if (r > 0) {
516 r = ioctl(kvm->vm_fd, KVM_SET_NR_MMU_PAGES, nrshadow_pages);
517 if (r == -1) {
518 fprintf(stderr, "kvm_set_shadow_pages: %m\n");
519 return -errno;
521 return 0;
523 #endif
524 return -1;
527 int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
529 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
530 int r;
532 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
533 KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
534 if (r > 0) {
535 *nrshadow_pages = ioctl(kvm->vm_fd, KVM_GET_NR_MMU_PAGES);
536 return 0;
538 #endif
539 return -1;
542 #ifdef KVM_CAP_VAPIC
544 static int tpr_access_reporting(kvm_context_t kvm, int vcpu, int enabled)
546 int r;
547 struct kvm_tpr_access_ctl tac = {
548 .enabled = enabled,
551 r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC);
552 if (r == -1 || r == 0)
553 return -ENOSYS;
554 r = ioctl(kvm->vcpu_fd[vcpu], KVM_TPR_ACCESS_REPORTING, &tac);
555 if (r == -1) {
556 r = -errno;
557 perror("KVM_TPR_ACCESS_REPORTING");
558 return r;
560 return 0;
563 int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
565 return tpr_access_reporting(kvm, vcpu, 1);
568 int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
570 return tpr_access_reporting(kvm, vcpu, 0);
573 #endif
575 #ifdef KVM_CAP_EXT_CPUID
577 static struct kvm_cpuid2 *try_get_cpuid(kvm_context_t kvm, int max)
579 struct kvm_cpuid2 *cpuid;
580 int r, size;
582 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
583 cpuid = (struct kvm_cpuid2 *)malloc(size);
584 cpuid->nent = max;
585 r = ioctl(kvm->fd, KVM_GET_SUPPORTED_CPUID, cpuid);
586 if (r == -1)
587 r = -errno;
588 else if (r == 0 && cpuid->nent >= max)
589 r = -E2BIG;
590 if (r < 0) {
591 if (r == -E2BIG) {
592 free(cpuid);
593 return NULL;
594 } else {
595 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
596 strerror(-r));
597 exit(1);
600 return cpuid;
603 #define R_EAX 0
604 #define R_ECX 1
605 #define R_EDX 2
606 #define R_EBX 3
607 #define R_ESP 4
608 #define R_EBP 5
609 #define R_ESI 6
610 #define R_EDI 7
612 uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
614 struct kvm_cpuid2 *cpuid;
615 int i, max;
616 uint32_t ret = 0;
617 uint32_t cpuid_1_edx;
619 if (!kvm_check_extension(kvm, KVM_CAP_EXT_CPUID)) {
620 return -1U;
623 max = 1;
624 while ((cpuid = try_get_cpuid(kvm, max)) == NULL) {
625 max *= 2;
628 for (i = 0; i < cpuid->nent; ++i) {
629 if (cpuid->entries[i].function == function) {
630 switch (reg) {
631 case R_EAX:
632 ret = cpuid->entries[i].eax;
633 break;
634 case R_EBX:
635 ret = cpuid->entries[i].ebx;
636 break;
637 case R_ECX:
638 ret = cpuid->entries[i].ecx;
639 break;
640 case R_EDX:
641 ret = cpuid->entries[i].edx;
642 if (function == 1) {
643 /* kvm misreports the following features
645 ret |= 1 << 12; /* MTRR */
646 ret |= 1 << 16; /* PAT */
647 ret |= 1 << 7; /* MCE */
648 ret |= 1 << 14; /* MCA */
651 /* On Intel, kvm returns cpuid according to
652 * the Intel spec, so add missing bits
653 * according to the AMD spec:
655 if (function == 0x80000001) {
656 cpuid_1_edx = kvm_get_supported_cpuid(kvm, 1, R_EDX);
657 ret |= cpuid_1_edx & 0xdfeff7ff;
659 break;
664 free(cpuid);
666 return ret;
669 #else
671 uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
673 return -1U;
676 #endif