i386: hvf: abort on decoding error
[qemu/kevin.git] / target / i386 / hvf / hvf.c
blob8ceba73a5c846f764714d97f0d64c0fa570986ce
1 /* Copyright 2008 IBM Corporation
2 * 2008 Red Hat, Inc.
3 * Copyright 2011 Intel Corporation
4 * Copyright 2016 Veertu, Inc.
5 * Copyright 2017 The Android Open Source Project
7 * QEMU Hypervisor.framework support
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qemu-common.h"
23 #include "qemu/error-report.h"
25 #include "sysemu/hvf.h"
26 #include "hvf-i386.h"
27 #include "vmcs.h"
28 #include "vmx.h"
29 #include "x86.h"
30 #include "x86_descr.h"
31 #include "x86_mmu.h"
32 #include "x86_decode.h"
33 #include "x86_emu.h"
34 #include "x86_task.h"
35 #include "x86hvf.h"
37 #include <Hypervisor/hv.h>
38 #include <Hypervisor/hv_vmx.h>
40 #include "exec/address-spaces.h"
41 #include "exec/exec-all.h"
42 #include "exec/ioport.h"
43 #include "hw/i386/apic_internal.h"
44 #include "hw/boards.h"
45 #include "qemu/main-loop.h"
46 #include "strings.h"
47 #include "sysemu/accel.h"
48 #include "sysemu/sysemu.h"
49 #include "target/i386/cpu.h"
51 pthread_rwlock_t mem_lock = PTHREAD_RWLOCK_INITIALIZER;
52 HVFState *hvf_state;
53 int hvf_disabled = 1;
55 static void assert_hvf_ok(hv_return_t ret)
57 if (ret == HV_SUCCESS) {
58 return;
61 switch (ret) {
62 case HV_ERROR:
63 error_report("Error: HV_ERROR\n");
64 break;
65 case HV_BUSY:
66 error_report("Error: HV_BUSY\n");
67 break;
68 case HV_BAD_ARGUMENT:
69 error_report("Error: HV_BAD_ARGUMENT\n");
70 break;
71 case HV_NO_RESOURCES:
72 error_report("Error: HV_NO_RESOURCES\n");
73 break;
74 case HV_NO_DEVICE:
75 error_report("Error: HV_NO_DEVICE\n");
76 break;
77 case HV_UNSUPPORTED:
78 error_report("Error: HV_UNSUPPORTED\n");
79 break;
80 default:
81 error_report("Unknown Error\n");
84 abort();
87 /* Memory slots */
88 hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t end)
90 hvf_slot *slot;
91 int x;
92 for (x = 0; x < hvf_state->num_slots; ++x) {
93 slot = &hvf_state->slots[x];
94 if (slot->size && start < (slot->start + slot->size) &&
95 end > slot->start) {
96 return slot;
99 return NULL;
102 struct mac_slot {
103 int present;
104 uint64_t size;
105 uint64_t gpa_start;
106 uint64_t gva;
109 struct mac_slot mac_slots[32];
110 #define ALIGN(x, y) (((x) + (y) - 1) & ~((y) - 1))
112 static int do_hvf_set_memory(hvf_slot *slot)
114 struct mac_slot *macslot;
115 hv_memory_flags_t flags;
116 hv_return_t ret;
118 macslot = &mac_slots[slot->slot_id];
120 if (macslot->present) {
121 if (macslot->size != slot->size) {
122 macslot->present = 0;
123 ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
124 assert_hvf_ok(ret);
128 if (!slot->size) {
129 return 0;
132 flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
134 macslot->present = 1;
135 macslot->gpa_start = slot->start;
136 macslot->size = slot->size;
137 ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);
138 assert_hvf_ok(ret);
139 return 0;
142 void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
144 hvf_slot *mem;
145 MemoryRegion *area = section->mr;
147 if (!memory_region_is_ram(area)) {
148 return;
151 mem = hvf_find_overlap_slot(
152 section->offset_within_address_space,
153 section->offset_within_address_space + int128_get64(section->size));
155 if (mem && add) {
156 if (mem->size == int128_get64(section->size) &&
157 mem->start == section->offset_within_address_space &&
158 mem->mem == (memory_region_get_ram_ptr(area) +
159 section->offset_within_region)) {
160 return; /* Same region was attempted to register, go away. */
164 /* Region needs to be reset. set the size to 0 and remap it. */
165 if (mem) {
166 mem->size = 0;
167 if (do_hvf_set_memory(mem)) {
168 error_report("Failed to reset overlapping slot\n");
169 abort();
173 if (!add) {
174 return;
177 /* Now make a new slot. */
178 int x;
180 for (x = 0; x < hvf_state->num_slots; ++x) {
181 mem = &hvf_state->slots[x];
182 if (!mem->size) {
183 break;
187 if (x == hvf_state->num_slots) {
188 error_report("No free slots\n");
189 abort();
192 mem->size = int128_get64(section->size);
193 mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
194 mem->start = section->offset_within_address_space;
195 mem->region = area;
197 if (do_hvf_set_memory(mem)) {
198 error_report("Error registering new memory slot\n");
199 abort();
203 void vmx_update_tpr(CPUState *cpu)
205 /* TODO: need integrate APIC handling */
206 X86CPU *x86_cpu = X86_CPU(cpu);
207 int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;
208 int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);
210 wreg(cpu->hvf_fd, HV_X86_TPR, tpr);
211 if (irr == -1) {
212 wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
213 } else {
214 wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
215 irr >> 4);
219 void update_apic_tpr(CPUState *cpu)
221 X86CPU *x86_cpu = X86_CPU(cpu);
222 int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;
223 cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
226 #define VECTORING_INFO_VECTOR_MASK 0xff
228 static void hvf_handle_interrupt(CPUState * cpu, int mask)
230 cpu->interrupt_request |= mask;
231 if (!qemu_cpu_is_self(cpu)) {
232 qemu_cpu_kick(cpu);
236 void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
237 int direction, int size, int count)
239 int i;
240 uint8_t *ptr = buffer;
242 for (i = 0; i < count; i++) {
243 address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,
244 ptr, size,
245 direction);
246 ptr += size;
250 /* TODO: synchronize vcpu state */
251 static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
253 CPUState *cpu_state = cpu;
254 if (cpu_state->vcpu_dirty == 0) {
255 hvf_get_registers(cpu_state);
258 cpu_state->vcpu_dirty = 1;
261 void hvf_cpu_synchronize_state(CPUState *cpu_state)
263 if (cpu_state->vcpu_dirty == 0) {
264 run_on_cpu(cpu_state, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
268 static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
270 CPUState *cpu_state = cpu;
271 hvf_put_registers(cpu_state);
272 cpu_state->vcpu_dirty = false;
275 void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)
277 run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
280 void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
282 CPUState *cpu_state = cpu;
283 hvf_put_registers(cpu_state);
284 cpu_state->vcpu_dirty = false;
287 void hvf_cpu_synchronize_post_init(CPUState *cpu_state)
289 run_on_cpu(cpu_state, _hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
292 static bool ept_emulation_fault(hvf_slot *slot, addr_t gpa, uint64_t ept_qual)
294 int read, write;
296 /* EPT fault on an instruction fetch doesn't make sense here */
297 if (ept_qual & EPT_VIOLATION_INST_FETCH) {
298 return false;
301 /* EPT fault must be a read fault or a write fault */
302 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
303 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
304 if ((read | write) == 0) {
305 return false;
308 if (write && slot) {
309 if (slot->flags & HVF_SLOT_LOG) {
310 memory_region_set_dirty(slot->region, gpa - slot->start, 1);
311 hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
312 HV_MEMORY_READ | HV_MEMORY_WRITE);
317 * The EPT violation must have been caused by accessing a
318 * guest-physical address that is a translation of a guest-linear
319 * address.
321 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
322 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
323 return false;
326 return !slot;
329 static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
331 hvf_slot *slot;
333 slot = hvf_find_overlap_slot(
334 section->offset_within_address_space,
335 section->offset_within_address_space + int128_get64(section->size));
337 /* protect region against writes; begin tracking it */
338 if (on) {
339 slot->flags |= HVF_SLOT_LOG;
340 hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
341 HV_MEMORY_READ);
342 /* stop tracking region*/
343 } else {
344 slot->flags &= ~HVF_SLOT_LOG;
345 hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
346 HV_MEMORY_READ | HV_MEMORY_WRITE);
350 static void hvf_log_start(MemoryListener *listener,
351 MemoryRegionSection *section, int old, int new)
353 if (old != 0) {
354 return;
357 hvf_set_dirty_tracking(section, 1);
360 static void hvf_log_stop(MemoryListener *listener,
361 MemoryRegionSection *section, int old, int new)
363 if (new != 0) {
364 return;
367 hvf_set_dirty_tracking(section, 0);
370 static void hvf_log_sync(MemoryListener *listener,
371 MemoryRegionSection *section)
374 * sync of dirty pages is handled elsewhere; just make sure we keep
375 * tracking the region.
377 hvf_set_dirty_tracking(section, 1);
380 static void hvf_region_add(MemoryListener *listener,
381 MemoryRegionSection *section)
383 hvf_set_phys_mem(section, true);
386 static void hvf_region_del(MemoryListener *listener,
387 MemoryRegionSection *section)
389 hvf_set_phys_mem(section, false);
392 static MemoryListener hvf_memory_listener = {
393 .priority = 10,
394 .region_add = hvf_region_add,
395 .region_del = hvf_region_del,
396 .log_start = hvf_log_start,
397 .log_stop = hvf_log_stop,
398 .log_sync = hvf_log_sync,
401 void hvf_reset_vcpu(CPUState *cpu) {
403 /* TODO: this shouldn't be needed; there is already a call to
404 * cpu_synchronize_all_post_reset in vl.c
406 wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);
407 wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);
408 macvm_set_cr0(cpu->hvf_fd, 0x60000010);
410 wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);
411 wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0);
412 wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK);
414 /* set VMCS guest state fields */
415 wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000);
416 wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff);
417 wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b);
418 wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000);
420 wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0);
421 wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff);
422 wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93);
423 wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0);
425 wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0);
426 wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff);
427 wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93);
428 wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0);
430 wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0);
431 wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff);
432 wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93);
433 wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0);
435 wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0);
436 wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff);
437 wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93);
438 wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0);
440 wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0);
441 wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff);
442 wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93);
443 wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0);
445 wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0);
446 wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0);
447 wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000);
448 wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0);
450 wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0);
451 wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0);
452 wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83);
453 wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0);
455 wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0);
456 wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0);
458 wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0);
459 wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0);
461 /*wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);*/
462 wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0);
464 wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0);
465 wreg(cpu->hvf_fd, HV_X86_RDX, 0x623);
466 wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2);
467 wreg(cpu->hvf_fd, HV_X86_RSP, 0x0);
468 wreg(cpu->hvf_fd, HV_X86_RAX, 0x0);
469 wreg(cpu->hvf_fd, HV_X86_RBX, 0x0);
470 wreg(cpu->hvf_fd, HV_X86_RCX, 0x0);
471 wreg(cpu->hvf_fd, HV_X86_RSI, 0x0);
472 wreg(cpu->hvf_fd, HV_X86_RDI, 0x0);
473 wreg(cpu->hvf_fd, HV_X86_RBP, 0x0);
475 for (int i = 0; i < 8; i++) {
476 wreg(cpu->hvf_fd, HV_X86_R8 + i, 0x0);
479 hv_vm_sync_tsc(0);
480 cpu->halted = 0;
481 hv_vcpu_invalidate_tlb(cpu->hvf_fd);
482 hv_vcpu_flush(cpu->hvf_fd);
485 void hvf_vcpu_destroy(CPUState *cpu)
487 hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);
488 assert_hvf_ok(ret);
491 static void dummy_signal(int sig)
495 int hvf_init_vcpu(CPUState *cpu)
498 X86CPU *x86cpu = X86_CPU(cpu);
499 CPUX86State *env = &x86cpu->env;
500 int r;
502 /* init cpu signals */
503 sigset_t set;
504 struct sigaction sigact;
506 memset(&sigact, 0, sizeof(sigact));
507 sigact.sa_handler = dummy_signal;
508 sigaction(SIG_IPI, &sigact, NULL);
510 pthread_sigmask(SIG_BLOCK, NULL, &set);
511 sigdelset(&set, SIG_IPI);
513 init_emu();
514 init_decoder();
516 hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
517 env->hvf_emul = g_new0(HVFX86EmulatorState, 1);
519 r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
520 cpu->vcpu_dirty = 1;
521 assert_hvf_ok(r);
523 if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,
524 &hvf_state->hvf_caps->vmx_cap_pinbased)) {
525 abort();
527 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED,
528 &hvf_state->hvf_caps->vmx_cap_procbased)) {
529 abort();
531 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2,
532 &hvf_state->hvf_caps->vmx_cap_procbased2)) {
533 abort();
535 if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY,
536 &hvf_state->hvf_caps->vmx_cap_entry)) {
537 abort();
540 /* set VMCS control fields */
541 wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS,
542 cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,
543 VMCS_PIN_BASED_CTLS_EXTINT |
544 VMCS_PIN_BASED_CTLS_NMI |
545 VMCS_PIN_BASED_CTLS_VNMI));
546 wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS,
547 cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,
548 VMCS_PRI_PROC_BASED_CTLS_HLT |
549 VMCS_PRI_PROC_BASED_CTLS_MWAIT |
550 VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
551 VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
552 VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
553 wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,
554 cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,
555 VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
557 wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
558 0));
559 wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
561 wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
563 hvf_reset_vcpu(cpu);
565 x86cpu = X86_CPU(cpu);
566 x86cpu->env.kvm_xsave_buf = qemu_memalign(4096, 4096);
568 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
569 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
570 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);
571 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);
572 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);
573 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
574 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
575 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
576 /*hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);*/
577 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
578 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
579 hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
581 return 0;
584 void hvf_disable(int shouldDisable)
586 hvf_disabled = shouldDisable;
589 static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_info)
591 X86CPU *x86_cpu = X86_CPU(cpu);
592 CPUX86State *env = &x86_cpu->env;
594 env->exception_injected = -1;
595 env->interrupt_injected = -1;
596 env->nmi_injected = false;
597 if (idtvec_info & VMCS_IDT_VEC_VALID) {
598 switch (idtvec_info & VMCS_IDT_VEC_TYPE) {
599 case VMCS_IDT_VEC_HWINTR:
600 case VMCS_IDT_VEC_SWINTR:
601 env->interrupt_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
602 break;
603 case VMCS_IDT_VEC_NMI:
604 env->nmi_injected = true;
605 break;
606 case VMCS_IDT_VEC_HWEXCEPTION:
607 case VMCS_IDT_VEC_SWEXCEPTION:
608 env->exception_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
609 break;
610 case VMCS_IDT_VEC_PRIV_SWEXCEPTION:
611 default:
612 abort();
614 if ((idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWEXCEPTION ||
615 (idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWINTR) {
616 env->ins_len = ins_len;
618 if (idtvec_info & VMCS_INTR_DEL_ERRCODE) {
619 env->has_error_code = true;
620 env->error_code = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_ERROR);
623 if ((rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
624 VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {
625 env->hflags2 |= HF2_NMI_MASK;
626 } else {
627 env->hflags2 &= ~HF2_NMI_MASK;
629 if (rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
630 (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
631 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
632 env->hflags |= HF_INHIBIT_IRQ_MASK;
633 } else {
634 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
638 int hvf_vcpu_exec(CPUState *cpu)
640 X86CPU *x86_cpu = X86_CPU(cpu);
641 CPUX86State *env = &x86_cpu->env;
642 int ret = 0;
643 uint64_t rip = 0;
645 cpu->halted = 0;
647 if (hvf_process_events(cpu)) {
648 return EXCP_HLT;
651 do {
652 if (cpu->vcpu_dirty) {
653 hvf_put_registers(cpu);
654 cpu->vcpu_dirty = false;
657 if (hvf_inject_interrupts(cpu)) {
658 return EXCP_INTERRUPT;
660 vmx_update_tpr(cpu);
662 qemu_mutex_unlock_iothread();
663 if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
664 qemu_mutex_lock_iothread();
665 return EXCP_HLT;
668 hv_return_t r = hv_vcpu_run(cpu->hvf_fd);
669 assert_hvf_ok(r);
671 /* handle VMEXIT */
672 uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);
673 uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);
674 uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd,
675 VMCS_EXIT_INSTRUCTION_LENGTH);
677 uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
679 hvf_store_events(cpu, ins_len, idtvec_info);
680 rip = rreg(cpu->hvf_fd, HV_X86_RIP);
681 RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
682 env->eflags = RFLAGS(env);
684 qemu_mutex_lock_iothread();
686 update_apic_tpr(cpu);
687 current_cpu = cpu;
689 ret = 0;
690 switch (exit_reason) {
691 case EXIT_REASON_HLT: {
692 macvm_set_rip(cpu, rip + ins_len);
693 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
694 (EFLAGS(env) & IF_MASK))
695 && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
696 !(idtvec_info & VMCS_IDT_VEC_VALID)) {
697 cpu->halted = 1;
698 ret = EXCP_HLT;
700 ret = EXCP_INTERRUPT;
701 break;
703 case EXIT_REASON_MWAIT: {
704 ret = EXCP_INTERRUPT;
705 break;
707 /* Need to check if MMIO or unmmaped fault */
708 case EXIT_REASON_EPT_FAULT:
710 hvf_slot *slot;
711 addr_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);
713 if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&
714 ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {
715 vmx_set_nmi_blocking(cpu);
718 slot = hvf_find_overlap_slot(gpa, gpa);
719 /* mmio */
720 if (ept_emulation_fault(slot, gpa, exit_qual)) {
721 struct x86_decode decode;
723 load_regs(cpu);
724 env->hvf_emul->fetch_rip = rip;
726 decode_instruction(env, &decode);
727 exec_instruction(env, &decode);
728 store_regs(cpu);
729 break;
731 break;
733 case EXIT_REASON_INOUT:
735 uint32_t in = (exit_qual & 8) != 0;
736 uint32_t size = (exit_qual & 7) + 1;
737 uint32_t string = (exit_qual & 16) != 0;
738 uint32_t port = exit_qual >> 16;
739 /*uint32_t rep = (exit_qual & 0x20) != 0;*/
741 if (!string && in) {
742 uint64_t val = 0;
743 load_regs(cpu);
744 hvf_handle_io(env, port, &val, 0, size, 1);
745 if (size == 1) {
746 AL(env) = val;
747 } else if (size == 2) {
748 AX(env) = val;
749 } else if (size == 4) {
750 RAX(env) = (uint32_t)val;
751 } else {
752 VM_PANIC("size");
754 RIP(env) += ins_len;
755 store_regs(cpu);
756 break;
757 } else if (!string && !in) {
758 RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX);
759 hvf_handle_io(env, port, &RAX(env), 1, size, 1);
760 macvm_set_rip(cpu, rip + ins_len);
761 break;
763 struct x86_decode decode;
765 load_regs(cpu);
766 env->hvf_emul->fetch_rip = rip;
768 decode_instruction(env, &decode);
769 assert(ins_len == decode.len);
770 exec_instruction(env, &decode);
771 store_regs(cpu);
773 break;
775 case EXIT_REASON_CPUID: {
776 uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
777 uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);
778 uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
779 uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
781 cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);
783 wreg(cpu->hvf_fd, HV_X86_RAX, rax);
784 wreg(cpu->hvf_fd, HV_X86_RBX, rbx);
785 wreg(cpu->hvf_fd, HV_X86_RCX, rcx);
786 wreg(cpu->hvf_fd, HV_X86_RDX, rdx);
788 macvm_set_rip(cpu, rip + ins_len);
789 break;
791 case EXIT_REASON_XSETBV: {
792 X86CPU *x86_cpu = X86_CPU(cpu);
793 CPUX86State *env = &x86_cpu->env;
794 uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
795 uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
796 uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
798 if (ecx) {
799 macvm_set_rip(cpu, rip + ins_len);
800 break;
802 env->xcr0 = ((uint64_t)edx << 32) | eax;
803 wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);
804 macvm_set_rip(cpu, rip + ins_len);
805 break;
807 case EXIT_REASON_INTR_WINDOW:
808 vmx_clear_int_window_exiting(cpu);
809 ret = EXCP_INTERRUPT;
810 break;
811 case EXIT_REASON_NMI_WINDOW:
812 vmx_clear_nmi_window_exiting(cpu);
813 ret = EXCP_INTERRUPT;
814 break;
815 case EXIT_REASON_EXT_INTR:
816 /* force exit and allow io handling */
817 ret = EXCP_INTERRUPT;
818 break;
819 case EXIT_REASON_RDMSR:
820 case EXIT_REASON_WRMSR:
822 load_regs(cpu);
823 if (exit_reason == EXIT_REASON_RDMSR) {
824 simulate_rdmsr(cpu);
825 } else {
826 simulate_wrmsr(cpu);
828 RIP(env) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
829 store_regs(cpu);
830 break;
832 case EXIT_REASON_CR_ACCESS: {
833 int cr;
834 int reg;
836 load_regs(cpu);
837 cr = exit_qual & 15;
838 reg = (exit_qual >> 8) & 15;
840 switch (cr) {
841 case 0x0: {
842 macvm_set_cr0(cpu->hvf_fd, RRX(env, reg));
843 break;
845 case 4: {
846 macvm_set_cr4(cpu->hvf_fd, RRX(env, reg));
847 break;
849 case 8: {
850 X86CPU *x86_cpu = X86_CPU(cpu);
851 if (exit_qual & 0x10) {
852 RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);
853 } else {
854 int tpr = RRX(env, reg);
855 cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
856 ret = EXCP_INTERRUPT;
858 break;
860 default:
861 error_report("Unrecognized CR %d\n", cr);
862 abort();
864 RIP(env) += ins_len;
865 store_regs(cpu);
866 break;
868 case EXIT_REASON_APIC_ACCESS: { /* TODO */
869 struct x86_decode decode;
871 load_regs(cpu);
872 env->hvf_emul->fetch_rip = rip;
874 decode_instruction(env, &decode);
875 exec_instruction(env, &decode);
876 store_regs(cpu);
877 break;
879 case EXIT_REASON_TPR: {
880 ret = 1;
881 break;
883 case EXIT_REASON_TASK_SWITCH: {
884 uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
885 x68_segment_selector sel = {.sel = exit_qual & 0xffff};
886 vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
887 vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
888 & VMCS_INTR_T_MASK);
889 break;
891 case EXIT_REASON_TRIPLE_FAULT: {
892 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
893 ret = EXCP_INTERRUPT;
894 break;
896 case EXIT_REASON_RDPMC:
897 wreg(cpu->hvf_fd, HV_X86_RAX, 0);
898 wreg(cpu->hvf_fd, HV_X86_RDX, 0);
899 macvm_set_rip(cpu, rip + ins_len);
900 break;
901 case VMX_REASON_VMCALL:
902 env->exception_injected = EXCP0D_GPF;
903 env->has_error_code = true;
904 env->error_code = 0;
905 break;
906 default:
907 error_report("%llx: unhandled exit %llx\n", rip, exit_reason);
909 } while (ret == 0);
911 return ret;
914 static bool hvf_allowed;
916 static int hvf_accel_init(MachineState *ms)
918 int x;
919 hv_return_t ret;
920 HVFState *s;
922 hvf_disable(0);
923 ret = hv_vm_create(HV_VM_DEFAULT);
924 assert_hvf_ok(ret);
926 s = g_new0(HVFState, 1);
928 s->num_slots = 32;
929 for (x = 0; x < s->num_slots; ++x) {
930 s->slots[x].size = 0;
931 s->slots[x].slot_id = x;
934 hvf_state = s;
935 cpu_interrupt_handler = hvf_handle_interrupt;
936 memory_listener_register(&hvf_memory_listener, &address_space_memory);
937 return 0;
940 static void hvf_accel_class_init(ObjectClass *oc, void *data)
942 AccelClass *ac = ACCEL_CLASS(oc);
943 ac->name = "HVF";
944 ac->init_machine = hvf_accel_init;
945 ac->allowed = &hvf_allowed;
948 static const TypeInfo hvf_accel_type = {
949 .name = TYPE_HVF_ACCEL,
950 .parent = TYPE_ACCEL,
951 .class_init = hvf_accel_class_init,
954 static void hvf_type_init(void)
956 type_register_static(&hvf_accel_type);
959 type_init(hvf_type_init);