10 #include <sys/types.h>
15 int kvm_set_tss_addr(kvm_context_t kvm
, unsigned long addr
)
17 #ifdef KVM_CAP_SET_TSS_ADDR
20 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
22 r
= ioctl(kvm
->vm_fd
, KVM_SET_TSS_ADDR
, addr
);
24 fprintf(stderr
, "kvm_set_tss_addr: %m\n");
33 static int kvm_init_tss(kvm_context_t kvm
)
35 #ifdef KVM_CAP_SET_TSS_ADDR
38 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
41 * this address is 3 pages before the bios, and the bios should present
44 r
= kvm_set_tss_addr(kvm
, 0xfffbd000);
46 fprintf(stderr
, "kvm_init_tss: unable to set tss addr\n");
55 int kvm_create_pit(kvm_context_t kvm
)
60 kvm
->pit_in_kernel
= 0;
61 if (!kvm
->no_pit_creation
) {
62 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_PIT
);
64 r
= ioctl(kvm
->vm_fd
, KVM_CREATE_PIT
);
66 kvm
->pit_in_kernel
= 1;
68 fprintf(stderr
, "Create kernel PIC irqchip failed\n");
77 int kvm_arch_create(kvm_context_t kvm
, unsigned long phys_mem_bytes
,
82 r
= kvm_init_tss(kvm
);
86 r
= kvm_create_pit(kvm
);
90 r
= kvm_init_coalesced_mmio(kvm
);
97 #ifdef KVM_EXIT_TPR_ACCESS
99 static int handle_tpr_access(kvm_context_t kvm
, struct kvm_run
*run
, int vcpu
)
101 return kvm
->callbacks
->tpr_access(kvm
->opaque
, vcpu
,
103 run
->tpr_access
.is_write
);
107 int kvm_enable_vapic(kvm_context_t kvm
, int vcpu
, uint64_t vapic
)
110 struct kvm_vapic_addr va
= {
114 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_VAPIC_ADDR
, &va
);
117 perror("kvm_enable_vapic");
125 int kvm_arch_run(struct kvm_run
*run
,kvm_context_t kvm
, int vcpu
)
129 switch (run
->exit_reason
) {
130 #ifdef KVM_EXIT_SET_TPR
131 case KVM_EXIT_SET_TPR
:
134 #ifdef KVM_EXIT_TPR_ACCESS
135 case KVM_EXIT_TPR_ACCESS
:
136 r
= handle_tpr_access(kvm
, run
, vcpu
);
147 #define MAX_ALIAS_SLOTS 4
151 } kvm_aliases
[MAX_ALIAS_SLOTS
];
153 static int get_alias_slot(uint64_t start
)
157 for (i
=0; i
<MAX_ALIAS_SLOTS
; i
++)
158 if (kvm_aliases
[i
].start
== start
)
162 static int get_free_alias_slot(void)
166 for (i
=0; i
<MAX_ALIAS_SLOTS
; i
++)
167 if (kvm_aliases
[i
].len
== 0)
172 static void register_alias(int slot
, uint64_t start
, uint64_t len
)
174 kvm_aliases
[slot
].start
= start
;
175 kvm_aliases
[slot
].len
= len
;
178 int kvm_create_memory_alias(kvm_context_t kvm
,
181 uint64_t target_phys
)
183 struct kvm_memory_alias alias
= {
185 .guest_phys_addr
= phys_start
,
187 .target_phys_addr
= target_phys
,
193 slot
= get_alias_slot(phys_start
);
195 slot
= get_free_alias_slot();
200 r
= ioctl(fd
, KVM_SET_MEMORY_ALIAS
, &alias
);
204 register_alias(slot
, phys_start
, len
);
208 int kvm_destroy_memory_alias(kvm_context_t kvm
, uint64_t phys_start
)
210 return kvm_create_memory_alias(kvm
, phys_start
, 0, 0);
213 #ifdef KVM_CAP_IRQCHIP
215 int kvm_get_lapic(kvm_context_t kvm
, int vcpu
, struct kvm_lapic_state
*s
)
218 if (!kvm
->irqchip_in_kernel
)
220 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_LAPIC
, s
);
223 perror("kvm_get_lapic");
228 int kvm_set_lapic(kvm_context_t kvm
, int vcpu
, struct kvm_lapic_state
*s
)
231 if (!kvm
->irqchip_in_kernel
)
233 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_LAPIC
, s
);
236 perror("kvm_set_lapic");
245 int kvm_get_pit(kvm_context_t kvm
, struct kvm_pit_state
*s
)
248 if (!kvm
->pit_in_kernel
)
250 r
= ioctl(kvm
->vm_fd
, KVM_GET_PIT
, s
);
253 perror("kvm_get_pit");
258 int kvm_set_pit(kvm_context_t kvm
, struct kvm_pit_state
*s
)
261 if (!kvm
->pit_in_kernel
)
263 r
= ioctl(kvm
->vm_fd
, KVM_SET_PIT
, s
);
266 perror("kvm_set_pit");
273 void kvm_show_code(kvm_context_t kvm
, int vcpu
)
275 #define SHOW_CODE_LEN 50
276 int fd
= kvm
->vcpu_fd
[vcpu
];
277 struct kvm_regs regs
;
278 struct kvm_sregs sregs
;
282 char code_str
[SHOW_CODE_LEN
* 3 + 1];
285 r
= ioctl(fd
, KVM_GET_SREGS
, &sregs
);
287 perror("KVM_GET_SREGS");
290 r
= ioctl(fd
, KVM_GET_REGS
, ®s
);
292 perror("KVM_GET_REGS");
295 rip
= sregs
.cs
.base
+ regs
.rip
;
296 back_offset
= regs
.rip
;
297 if (back_offset
> 20)
300 for (n
= -back_offset
; n
< SHOW_CODE_LEN
-back_offset
; ++n
) {
302 strcat(code_str
, " -->");
303 r
= kvm
->callbacks
->mmio_read(kvm
->opaque
, rip
+ n
, &code
, 1);
305 strcat(code_str
, " xx");
308 sprintf(code_str
+ strlen(code_str
), " %02x", code
);
310 fprintf(stderr
, "code:%s\n", code_str
);
315 * Returns available msr list. User must free.
317 struct kvm_msr_list
*kvm_get_msr_list(kvm_context_t kvm
)
319 struct kvm_msr_list sizer
, *msrs
;
323 r
= ioctl(kvm
->fd
, KVM_GET_MSR_INDEX_LIST
, &sizer
);
324 if (r
== -1 && errno
!= E2BIG
)
326 msrs
= malloc(sizeof *msrs
+ sizer
.nmsrs
* sizeof *msrs
->indices
);
331 msrs
->nmsrs
= sizer
.nmsrs
;
332 r
= ioctl(kvm
->fd
, KVM_GET_MSR_INDEX_LIST
, msrs
);
342 int kvm_get_msrs(kvm_context_t kvm
, int vcpu
, struct kvm_msr_entry
*msrs
,
345 struct kvm_msrs
*kmsrs
= malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
353 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
354 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_GET_MSRS
, kmsrs
);
356 memcpy(msrs
, kmsrs
->entries
, n
* sizeof *msrs
);
362 int kvm_set_msrs(kvm_context_t kvm
, int vcpu
, struct kvm_msr_entry
*msrs
,
365 struct kvm_msrs
*kmsrs
= malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
373 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
374 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_MSRS
, kmsrs
);
381 static void print_seg(FILE *file
, const char *name
, struct kvm_segment
*seg
)
384 "%s %04x (%08llx/%08x p %d dpl %d db %d s %d type %x l %d"
386 name
, seg
->selector
, seg
->base
, seg
->limit
, seg
->present
,
387 seg
->dpl
, seg
->db
, seg
->s
, seg
->type
, seg
->l
, seg
->g
,
391 static void print_dt(FILE *file
, const char *name
, struct kvm_dtable
*dt
)
393 fprintf(stderr
, "%s %llx/%x\n", name
, dt
->base
, dt
->limit
);
396 void kvm_show_regs(kvm_context_t kvm
, int vcpu
)
398 int fd
= kvm
->vcpu_fd
[vcpu
];
399 struct kvm_regs regs
;
400 struct kvm_sregs sregs
;
403 r
= ioctl(fd
, KVM_GET_REGS
, ®s
);
405 perror("KVM_GET_REGS");
409 "rax %016llx rbx %016llx rcx %016llx rdx %016llx\n"
410 "rsi %016llx rdi %016llx rsp %016llx rbp %016llx\n"
411 "r8 %016llx r9 %016llx r10 %016llx r11 %016llx\n"
412 "r12 %016llx r13 %016llx r14 %016llx r15 %016llx\n"
413 "rip %016llx rflags %08llx\n",
414 regs
.rax
, regs
.rbx
, regs
.rcx
, regs
.rdx
,
415 regs
.rsi
, regs
.rdi
, regs
.rsp
, regs
.rbp
,
416 regs
.r8
, regs
.r9
, regs
.r10
, regs
.r11
,
417 regs
.r12
, regs
.r13
, regs
.r14
, regs
.r15
,
418 regs
.rip
, regs
.rflags
);
419 r
= ioctl(fd
, KVM_GET_SREGS
, &sregs
);
421 perror("KVM_GET_SREGS");
424 print_seg(stderr
, "cs", &sregs
.cs
);
425 print_seg(stderr
, "ds", &sregs
.ds
);
426 print_seg(stderr
, "es", &sregs
.es
);
427 print_seg(stderr
, "ss", &sregs
.ss
);
428 print_seg(stderr
, "fs", &sregs
.fs
);
429 print_seg(stderr
, "gs", &sregs
.gs
);
430 print_seg(stderr
, "tr", &sregs
.tr
);
431 print_seg(stderr
, "ldt", &sregs
.ldt
);
432 print_dt(stderr
, "gdt", &sregs
.gdt
);
433 print_dt(stderr
, "idt", &sregs
.idt
);
434 fprintf(stderr
, "cr0 %llx cr2 %llx cr3 %llx cr4 %llx cr8 %llx"
436 sregs
.cr0
, sregs
.cr2
, sregs
.cr3
, sregs
.cr4
, sregs
.cr8
,
440 uint64_t kvm_get_apic_base(kvm_context_t kvm
, int vcpu
)
442 struct kvm_run
*run
= kvm
->run
[vcpu
];
444 return run
->apic_base
;
447 void kvm_set_cr8(kvm_context_t kvm
, int vcpu
, uint64_t cr8
)
449 struct kvm_run
*run
= kvm
->run
[vcpu
];
454 __u64
kvm_get_cr8(kvm_context_t kvm
, int vcpu
)
456 return kvm
->run
[vcpu
]->cr8
;
459 int kvm_setup_cpuid(kvm_context_t kvm
, int vcpu
, int nent
,
460 struct kvm_cpuid_entry
*entries
)
462 struct kvm_cpuid
*cpuid
;
465 cpuid
= malloc(sizeof(*cpuid
) + nent
* sizeof(*entries
));
470 memcpy(cpuid
->entries
, entries
, nent
* sizeof(*entries
));
471 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_CPUID
, cpuid
);
477 int kvm_setup_cpuid2(kvm_context_t kvm
, int vcpu
, int nent
,
478 struct kvm_cpuid_entry2
*entries
)
480 struct kvm_cpuid2
*cpuid
;
483 cpuid
= malloc(sizeof(*cpuid
) + nent
* sizeof(*entries
));
488 memcpy(cpuid
->entries
, entries
, nent
* sizeof(*entries
));
489 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_SET_CPUID2
, cpuid
);
491 fprintf(stderr
, "kvm_setup_cpuid2: %m\n");
498 int kvm_set_shadow_pages(kvm_context_t kvm
, unsigned int nrshadow_pages
)
500 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
503 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
,
504 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
506 r
= ioctl(kvm
->vm_fd
, KVM_SET_NR_MMU_PAGES
, nrshadow_pages
);
508 fprintf(stderr
, "kvm_set_shadow_pages: %m\n");
517 int kvm_get_shadow_pages(kvm_context_t kvm
, unsigned int *nrshadow_pages
)
519 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
522 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
,
523 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
525 *nrshadow_pages
= ioctl(kvm
->vm_fd
, KVM_GET_NR_MMU_PAGES
);
534 static int tpr_access_reporting(kvm_context_t kvm
, int vcpu
, int enabled
)
537 struct kvm_tpr_access_ctl tac
= {
541 r
= ioctl(kvm
->fd
, KVM_CHECK_EXTENSION
, KVM_CAP_VAPIC
);
542 if (r
== -1 || r
== 0)
544 r
= ioctl(kvm
->vcpu_fd
[vcpu
], KVM_TPR_ACCESS_REPORTING
, &tac
);
547 perror("KVM_TPR_ACCESS_REPORTING");
553 int kvm_enable_tpr_access_reporting(kvm_context_t kvm
, int vcpu
)
555 return tpr_access_reporting(kvm
, vcpu
, 1);
558 int kvm_disable_tpr_access_reporting(kvm_context_t kvm
, int vcpu
)
560 return tpr_access_reporting(kvm
, vcpu
, 0);