1 /* Copyright 2008 IBM Corporation
3 * Copyright 2011 Intel Corporation
4 * Copyright 2016 Veertu, Inc.
5 * Copyright 2017 The Android Open Source Project
7 * QEMU Hypervisor.framework support
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This file contain code under public domain from the hvdos project:
22 * https://github.com/mist64/hvdos
24 * Parts Copyright (c) 2011 NetApp, Inc.
25 * All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
36 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49 #include "qemu/osdep.h"
50 #include "qemu-common.h"
51 #include "qemu/error-report.h"
53 #include "sysemu/hvf.h"
54 #include "sysemu/runstate.h"
59 #include "x86_descr.h"
61 #include "x86_decode.h"
66 #include <Hypervisor/hv.h>
67 #include <Hypervisor/hv_vmx.h>
69 #include "exec/address-spaces.h"
70 #include "hw/i386/apic_internal.h"
71 #include "qemu/main-loop.h"
72 #include "sysemu/accel.h"
73 #include "target/i386/cpu.h"
79 static void assert_hvf_ok(hv_return_t ret
)
81 if (ret
== HV_SUCCESS
) {
87 error_report("Error: HV_ERROR");
90 error_report("Error: HV_BUSY");
93 error_report("Error: HV_BAD_ARGUMENT");
96 error_report("Error: HV_NO_RESOURCES");
99 error_report("Error: HV_NO_DEVICE");
102 error_report("Error: HV_UNSUPPORTED");
105 error_report("Unknown Error");
112 hvf_slot
*hvf_find_overlap_slot(uint64_t start
, uint64_t size
)
116 for (x
= 0; x
< hvf_state
->num_slots
; ++x
) {
117 slot
= &hvf_state
->slots
[x
];
118 if (slot
->size
&& start
< (slot
->start
+ slot
->size
) &&
119 (start
+ size
) > slot
->start
) {
133 struct mac_slot mac_slots
[32];
135 static int do_hvf_set_memory(hvf_slot
*slot
, hv_memory_flags_t flags
)
137 struct mac_slot
*macslot
;
140 macslot
= &mac_slots
[slot
->slot_id
];
142 if (macslot
->present
) {
143 if (macslot
->size
!= slot
->size
) {
144 macslot
->present
= 0;
145 ret
= hv_vm_unmap(macslot
->gpa_start
, macslot
->size
);
154 macslot
->present
= 1;
155 macslot
->gpa_start
= slot
->start
;
156 macslot
->size
= slot
->size
;
157 ret
= hv_vm_map((hv_uvaddr_t
)slot
->mem
, slot
->start
, slot
->size
, flags
);
162 void hvf_set_phys_mem(MemoryRegionSection
*section
, bool add
)
165 MemoryRegion
*area
= section
->mr
;
166 bool writeable
= !area
->readonly
&& !area
->rom_device
;
167 hv_memory_flags_t flags
;
169 if (!memory_region_is_ram(area
)) {
172 } else if (!memory_region_is_romd(area
)) {
174 * If the memory device is not in romd_mode, then we actually want
175 * to remove the hvf memory slot so all accesses will trap.
181 mem
= hvf_find_overlap_slot(
182 section
->offset_within_address_space
,
183 int128_get64(section
->size
));
186 if (mem
->size
== int128_get64(section
->size
) &&
187 mem
->start
== section
->offset_within_address_space
&&
188 mem
->mem
== (memory_region_get_ram_ptr(area
) +
189 section
->offset_within_region
)) {
190 return; /* Same region was attempted to register, go away. */
194 /* Region needs to be reset. set the size to 0 and remap it. */
197 if (do_hvf_set_memory(mem
, 0)) {
198 error_report("Failed to reset overlapping slot");
207 if (area
->readonly
||
208 (!memory_region_is_ram(area
) && memory_region_is_romd(area
))) {
209 flags
= HV_MEMORY_READ
| HV_MEMORY_EXEC
;
211 flags
= HV_MEMORY_READ
| HV_MEMORY_WRITE
| HV_MEMORY_EXEC
;
214 /* Now make a new slot. */
217 for (x
= 0; x
< hvf_state
->num_slots
; ++x
) {
218 mem
= &hvf_state
->slots
[x
];
224 if (x
== hvf_state
->num_slots
) {
225 error_report("No free slots");
229 mem
->size
= int128_get64(section
->size
);
230 mem
->mem
= memory_region_get_ram_ptr(area
) + section
->offset_within_region
;
231 mem
->start
= section
->offset_within_address_space
;
234 if (do_hvf_set_memory(mem
, flags
)) {
235 error_report("Error registering new memory slot");
240 void vmx_update_tpr(CPUState
*cpu
)
242 /* TODO: need integrate APIC handling */
243 X86CPU
*x86_cpu
= X86_CPU(cpu
);
244 int tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
) << 4;
245 int irr
= apic_get_highest_priority_irr(x86_cpu
->apic_state
);
247 wreg(cpu
->hvf_fd
, HV_X86_TPR
, tpr
);
249 wvmcs(cpu
->hvf_fd
, VMCS_TPR_THRESHOLD
, 0);
251 wvmcs(cpu
->hvf_fd
, VMCS_TPR_THRESHOLD
, (irr
> tpr
) ? tpr
>> 4 :
256 static void update_apic_tpr(CPUState
*cpu
)
258 X86CPU
*x86_cpu
= X86_CPU(cpu
);
259 int tpr
= rreg(cpu
->hvf_fd
, HV_X86_TPR
) >> 4;
260 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
263 #define VECTORING_INFO_VECTOR_MASK 0xff
265 void hvf_handle_io(CPUArchState
*env
, uint16_t port
, void *buffer
,
266 int direction
, int size
, int count
)
269 uint8_t *ptr
= buffer
;
271 for (i
= 0; i
< count
; i
++) {
272 address_space_rw(&address_space_io
, port
, MEMTXATTRS_UNSPECIFIED
,
279 static void do_hvf_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
281 if (!cpu
->vcpu_dirty
) {
282 hvf_get_registers(cpu
);
283 cpu
->vcpu_dirty
= true;
287 void hvf_cpu_synchronize_state(CPUState
*cpu
)
289 if (!cpu
->vcpu_dirty
) {
290 run_on_cpu(cpu
, do_hvf_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
294 static void do_hvf_cpu_synchronize_post_reset(CPUState
*cpu
,
297 hvf_put_registers(cpu
);
298 cpu
->vcpu_dirty
= false;
301 void hvf_cpu_synchronize_post_reset(CPUState
*cpu
)
303 run_on_cpu(cpu
, do_hvf_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
306 static void do_hvf_cpu_synchronize_post_init(CPUState
*cpu
,
309 hvf_put_registers(cpu
);
310 cpu
->vcpu_dirty
= false;
313 void hvf_cpu_synchronize_post_init(CPUState
*cpu
)
315 run_on_cpu(cpu
, do_hvf_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
318 static void do_hvf_cpu_synchronize_pre_loadvm(CPUState
*cpu
,
321 cpu
->vcpu_dirty
= true;
324 void hvf_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
326 run_on_cpu(cpu
, do_hvf_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
329 static bool ept_emulation_fault(hvf_slot
*slot
, uint64_t gpa
, uint64_t ept_qual
)
333 /* EPT fault on an instruction fetch doesn't make sense here */
334 if (ept_qual
& EPT_VIOLATION_INST_FETCH
) {
338 /* EPT fault must be a read fault or a write fault */
339 read
= ept_qual
& EPT_VIOLATION_DATA_READ
? 1 : 0;
340 write
= ept_qual
& EPT_VIOLATION_DATA_WRITE
? 1 : 0;
341 if ((read
| write
) == 0) {
346 if (slot
->flags
& HVF_SLOT_LOG
) {
347 memory_region_set_dirty(slot
->region
, gpa
- slot
->start
, 1);
348 hv_vm_protect((hv_gpaddr_t
)slot
->start
, (size_t)slot
->size
,
349 HV_MEMORY_READ
| HV_MEMORY_WRITE
);
354 * The EPT violation must have been caused by accessing a
355 * guest-physical address that is a translation of a guest-linear
358 if ((ept_qual
& EPT_VIOLATION_GLA_VALID
) == 0 ||
359 (ept_qual
& EPT_VIOLATION_XLAT_VALID
) == 0) {
366 if (!memory_region_is_ram(slot
->region
) &&
367 !(read
&& memory_region_is_romd(slot
->region
))) {
373 static void hvf_set_dirty_tracking(MemoryRegionSection
*section
, bool on
)
377 slot
= hvf_find_overlap_slot(
378 section
->offset_within_address_space
,
379 int128_get64(section
->size
));
381 /* protect region against writes; begin tracking it */
383 slot
->flags
|= HVF_SLOT_LOG
;
384 hv_vm_protect((hv_gpaddr_t
)slot
->start
, (size_t)slot
->size
,
386 /* stop tracking region*/
388 slot
->flags
&= ~HVF_SLOT_LOG
;
389 hv_vm_protect((hv_gpaddr_t
)slot
->start
, (size_t)slot
->size
,
390 HV_MEMORY_READ
| HV_MEMORY_WRITE
);
394 static void hvf_log_start(MemoryListener
*listener
,
395 MemoryRegionSection
*section
, int old
, int new)
401 hvf_set_dirty_tracking(section
, 1);
404 static void hvf_log_stop(MemoryListener
*listener
,
405 MemoryRegionSection
*section
, int old
, int new)
411 hvf_set_dirty_tracking(section
, 0);
414 static void hvf_log_sync(MemoryListener
*listener
,
415 MemoryRegionSection
*section
)
418 * sync of dirty pages is handled elsewhere; just make sure we keep
419 * tracking the region.
421 hvf_set_dirty_tracking(section
, 1);
424 static void hvf_region_add(MemoryListener
*listener
,
425 MemoryRegionSection
*section
)
427 hvf_set_phys_mem(section
, true);
430 static void hvf_region_del(MemoryListener
*listener
,
431 MemoryRegionSection
*section
)
433 hvf_set_phys_mem(section
, false);
436 static MemoryListener hvf_memory_listener
= {
438 .region_add
= hvf_region_add
,
439 .region_del
= hvf_region_del
,
440 .log_start
= hvf_log_start
,
441 .log_stop
= hvf_log_stop
,
442 .log_sync
= hvf_log_sync
,
445 void hvf_vcpu_destroy(CPUState
*cpu
)
447 X86CPU
*x86_cpu
= X86_CPU(cpu
);
448 CPUX86State
*env
= &x86_cpu
->env
;
450 hv_return_t ret
= hv_vcpu_destroy((hv_vcpuid_t
)cpu
->hvf_fd
);
451 g_free(env
->hvf_mmio_buf
);
455 static void dummy_signal(int sig
)
459 int hvf_init_vcpu(CPUState
*cpu
)
462 X86CPU
*x86cpu
= X86_CPU(cpu
);
463 CPUX86State
*env
= &x86cpu
->env
;
466 /* init cpu signals */
468 struct sigaction sigact
;
470 memset(&sigact
, 0, sizeof(sigact
));
471 sigact
.sa_handler
= dummy_signal
;
472 sigaction(SIG_IPI
, &sigact
, NULL
);
474 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
475 sigdelset(&set
, SIG_IPI
);
480 hvf_state
->hvf_caps
= g_new0(struct hvf_vcpu_caps
, 1);
481 env
->hvf_mmio_buf
= g_new(char, 4096);
483 r
= hv_vcpu_create((hv_vcpuid_t
*)&cpu
->hvf_fd
, HV_VCPU_DEFAULT
);
487 if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED
,
488 &hvf_state
->hvf_caps
->vmx_cap_pinbased
)) {
491 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED
,
492 &hvf_state
->hvf_caps
->vmx_cap_procbased
)) {
495 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2
,
496 &hvf_state
->hvf_caps
->vmx_cap_procbased2
)) {
499 if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY
,
500 &hvf_state
->hvf_caps
->vmx_cap_entry
)) {
504 /* set VMCS control fields */
505 wvmcs(cpu
->hvf_fd
, VMCS_PIN_BASED_CTLS
,
506 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_pinbased
,
507 VMCS_PIN_BASED_CTLS_EXTINT
|
508 VMCS_PIN_BASED_CTLS_NMI
|
509 VMCS_PIN_BASED_CTLS_VNMI
));
510 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
,
511 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_procbased
,
512 VMCS_PRI_PROC_BASED_CTLS_HLT
|
513 VMCS_PRI_PROC_BASED_CTLS_MWAIT
|
514 VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET
|
515 VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW
) |
516 VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL
);
517 wvmcs(cpu
->hvf_fd
, VMCS_SEC_PROC_BASED_CTLS
,
518 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_procbased2
,
519 VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES
));
521 wvmcs(cpu
->hvf_fd
, VMCS_ENTRY_CTLS
, cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_entry
,
523 wvmcs(cpu
->hvf_fd
, VMCS_EXCEPTION_BITMAP
, 0); /* Double fault */
525 wvmcs(cpu
->hvf_fd
, VMCS_TPR_THRESHOLD
, 0);
527 x86cpu
= X86_CPU(cpu
);
528 x86cpu
->env
.xsave_buf
= qemu_memalign(4096, 4096);
530 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_STAR
, 1);
531 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_LSTAR
, 1);
532 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_CSTAR
, 1);
533 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_FMASK
, 1);
534 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_FSBASE
, 1);
535 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_GSBASE
, 1);
536 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_KERNELGSBASE
, 1);
537 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_TSC_AUX
, 1);
538 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_IA32_TSC
, 1);
539 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_IA32_SYSENTER_CS
, 1);
540 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_IA32_SYSENTER_EIP
, 1);
541 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_IA32_SYSENTER_ESP
, 1);
546 static void hvf_store_events(CPUState
*cpu
, uint32_t ins_len
, uint64_t idtvec_info
)
548 X86CPU
*x86_cpu
= X86_CPU(cpu
);
549 CPUX86State
*env
= &x86_cpu
->env
;
551 env
->exception_nr
= -1;
552 env
->exception_pending
= 0;
553 env
->exception_injected
= 0;
554 env
->interrupt_injected
= -1;
555 env
->nmi_injected
= false;
557 env
->has_error_code
= false;
558 if (idtvec_info
& VMCS_IDT_VEC_VALID
) {
559 switch (idtvec_info
& VMCS_IDT_VEC_TYPE
) {
560 case VMCS_IDT_VEC_HWINTR
:
561 case VMCS_IDT_VEC_SWINTR
:
562 env
->interrupt_injected
= idtvec_info
& VMCS_IDT_VEC_VECNUM
;
564 case VMCS_IDT_VEC_NMI
:
565 env
->nmi_injected
= true;
567 case VMCS_IDT_VEC_HWEXCEPTION
:
568 case VMCS_IDT_VEC_SWEXCEPTION
:
569 env
->exception_nr
= idtvec_info
& VMCS_IDT_VEC_VECNUM
;
570 env
->exception_injected
= 1;
572 case VMCS_IDT_VEC_PRIV_SWEXCEPTION
:
576 if ((idtvec_info
& VMCS_IDT_VEC_TYPE
) == VMCS_IDT_VEC_SWEXCEPTION
||
577 (idtvec_info
& VMCS_IDT_VEC_TYPE
) == VMCS_IDT_VEC_SWINTR
) {
578 env
->ins_len
= ins_len
;
580 if (idtvec_info
& VMCS_IDT_VEC_ERRCODE_VALID
) {
581 env
->has_error_code
= true;
582 env
->error_code
= rvmcs(cpu
->hvf_fd
, VMCS_IDT_VECTORING_ERROR
);
585 if ((rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
) &
586 VMCS_INTERRUPTIBILITY_NMI_BLOCKING
)) {
587 env
->hflags2
|= HF2_NMI_MASK
;
589 env
->hflags2
&= ~HF2_NMI_MASK
;
591 if (rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
) &
592 (VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
593 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
)) {
594 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
596 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
600 int hvf_vcpu_exec(CPUState
*cpu
)
602 X86CPU
*x86_cpu
= X86_CPU(cpu
);
603 CPUX86State
*env
= &x86_cpu
->env
;
607 if (hvf_process_events(cpu
)) {
612 if (cpu
->vcpu_dirty
) {
613 hvf_put_registers(cpu
);
614 cpu
->vcpu_dirty
= false;
617 if (hvf_inject_interrupts(cpu
)) {
618 return EXCP_INTERRUPT
;
622 qemu_mutex_unlock_iothread();
623 if (!cpu_is_bsp(X86_CPU(cpu
)) && cpu
->halted
) {
624 qemu_mutex_lock_iothread();
628 hv_return_t r
= hv_vcpu_run(cpu
->hvf_fd
);
632 uint64_t exit_reason
= rvmcs(cpu
->hvf_fd
, VMCS_EXIT_REASON
);
633 uint64_t exit_qual
= rvmcs(cpu
->hvf_fd
, VMCS_EXIT_QUALIFICATION
);
634 uint32_t ins_len
= (uint32_t)rvmcs(cpu
->hvf_fd
,
635 VMCS_EXIT_INSTRUCTION_LENGTH
);
637 uint64_t idtvec_info
= rvmcs(cpu
->hvf_fd
, VMCS_IDT_VECTORING_INFO
);
639 hvf_store_events(cpu
, ins_len
, idtvec_info
);
640 rip
= rreg(cpu
->hvf_fd
, HV_X86_RIP
);
641 env
->eflags
= rreg(cpu
->hvf_fd
, HV_X86_RFLAGS
);
643 qemu_mutex_lock_iothread();
645 update_apic_tpr(cpu
);
649 switch (exit_reason
) {
650 case EXIT_REASON_HLT
: {
651 macvm_set_rip(cpu
, rip
+ ins_len
);
652 if (!((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
653 (env
->eflags
& IF_MASK
))
654 && !(cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) &&
655 !(idtvec_info
& VMCS_IDT_VEC_VALID
)) {
660 ret
= EXCP_INTERRUPT
;
663 case EXIT_REASON_MWAIT
: {
664 ret
= EXCP_INTERRUPT
;
667 /* Need to check if MMIO or unmapped fault */
668 case EXIT_REASON_EPT_FAULT
:
671 uint64_t gpa
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_PHYSICAL_ADDRESS
);
673 if (((idtvec_info
& VMCS_IDT_VEC_VALID
) == 0) &&
674 ((exit_qual
& EXIT_QUAL_NMIUDTI
) != 0)) {
675 vmx_set_nmi_blocking(cpu
);
678 slot
= hvf_find_overlap_slot(gpa
, 1);
680 if (ept_emulation_fault(slot
, gpa
, exit_qual
)) {
681 struct x86_decode decode
;
684 decode_instruction(env
, &decode
);
685 exec_instruction(env
, &decode
);
691 case EXIT_REASON_INOUT
:
693 uint32_t in
= (exit_qual
& 8) != 0;
694 uint32_t size
= (exit_qual
& 7) + 1;
695 uint32_t string
= (exit_qual
& 16) != 0;
696 uint32_t port
= exit_qual
>> 16;
697 /*uint32_t rep = (exit_qual & 0x20) != 0;*/
702 hvf_handle_io(env
, port
, &val
, 0, size
, 1);
705 } else if (size
== 2) {
707 } else if (size
== 4) {
708 RAX(env
) = (uint32_t)val
;
710 RAX(env
) = (uint64_t)val
;
715 } else if (!string
&& !in
) {
716 RAX(env
) = rreg(cpu
->hvf_fd
, HV_X86_RAX
);
717 hvf_handle_io(env
, port
, &RAX(env
), 1, size
, 1);
718 macvm_set_rip(cpu
, rip
+ ins_len
);
721 struct x86_decode decode
;
724 decode_instruction(env
, &decode
);
725 assert(ins_len
== decode
.len
);
726 exec_instruction(env
, &decode
);
731 case EXIT_REASON_CPUID
: {
732 uint32_t rax
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RAX
);
733 uint32_t rbx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RBX
);
734 uint32_t rcx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RCX
);
735 uint32_t rdx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RDX
);
737 cpu_x86_cpuid(env
, rax
, rcx
, &rax
, &rbx
, &rcx
, &rdx
);
739 wreg(cpu
->hvf_fd
, HV_X86_RAX
, rax
);
740 wreg(cpu
->hvf_fd
, HV_X86_RBX
, rbx
);
741 wreg(cpu
->hvf_fd
, HV_X86_RCX
, rcx
);
742 wreg(cpu
->hvf_fd
, HV_X86_RDX
, rdx
);
744 macvm_set_rip(cpu
, rip
+ ins_len
);
747 case EXIT_REASON_XSETBV
: {
748 X86CPU
*x86_cpu
= X86_CPU(cpu
);
749 CPUX86State
*env
= &x86_cpu
->env
;
750 uint32_t eax
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RAX
);
751 uint32_t ecx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RCX
);
752 uint32_t edx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RDX
);
755 macvm_set_rip(cpu
, rip
+ ins_len
);
758 env
->xcr0
= ((uint64_t)edx
<< 32) | eax
;
759 wreg(cpu
->hvf_fd
, HV_X86_XCR0
, env
->xcr0
| 1);
760 macvm_set_rip(cpu
, rip
+ ins_len
);
763 case EXIT_REASON_INTR_WINDOW
:
764 vmx_clear_int_window_exiting(cpu
);
765 ret
= EXCP_INTERRUPT
;
767 case EXIT_REASON_NMI_WINDOW
:
768 vmx_clear_nmi_window_exiting(cpu
);
769 ret
= EXCP_INTERRUPT
;
771 case EXIT_REASON_EXT_INTR
:
772 /* force exit and allow io handling */
773 ret
= EXCP_INTERRUPT
;
775 case EXIT_REASON_RDMSR
:
776 case EXIT_REASON_WRMSR
:
779 if (exit_reason
== EXIT_REASON_RDMSR
) {
788 case EXIT_REASON_CR_ACCESS
: {
794 reg
= (exit_qual
>> 8) & 15;
798 macvm_set_cr0(cpu
->hvf_fd
, RRX(env
, reg
));
802 macvm_set_cr4(cpu
->hvf_fd
, RRX(env
, reg
));
806 X86CPU
*x86_cpu
= X86_CPU(cpu
);
807 if (exit_qual
& 0x10) {
808 RRX(env
, reg
) = cpu_get_apic_tpr(x86_cpu
->apic_state
);
810 int tpr
= RRX(env
, reg
);
811 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
812 ret
= EXCP_INTERRUPT
;
817 error_report("Unrecognized CR %d", cr
);
824 case EXIT_REASON_APIC_ACCESS
: { /* TODO */
825 struct x86_decode decode
;
828 decode_instruction(env
, &decode
);
829 exec_instruction(env
, &decode
);
833 case EXIT_REASON_TPR
: {
837 case EXIT_REASON_TASK_SWITCH
: {
838 uint64_t vinfo
= rvmcs(cpu
->hvf_fd
, VMCS_IDT_VECTORING_INFO
);
839 x68_segment_selector sel
= {.sel
= exit_qual
& 0xffff};
840 vmx_handle_task_switch(cpu
, sel
, (exit_qual
>> 30) & 0x3,
841 vinfo
& VMCS_INTR_VALID
, vinfo
& VECTORING_INFO_VECTOR_MASK
, vinfo
845 case EXIT_REASON_TRIPLE_FAULT
: {
846 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
847 ret
= EXCP_INTERRUPT
;
850 case EXIT_REASON_RDPMC
:
851 wreg(cpu
->hvf_fd
, HV_X86_RAX
, 0);
852 wreg(cpu
->hvf_fd
, HV_X86_RDX
, 0);
853 macvm_set_rip(cpu
, rip
+ ins_len
);
855 case VMX_REASON_VMCALL
:
856 env
->exception_nr
= EXCP0D_GPF
;
857 env
->exception_injected
= 1;
858 env
->has_error_code
= true;
862 error_report("%llx: unhandled exit %llx", rip
, exit_reason
);
871 static int hvf_accel_init(MachineState
*ms
)
877 ret
= hv_vm_create(HV_VM_DEFAULT
);
880 s
= g_new0(HVFState
, 1);
883 for (x
= 0; x
< s
->num_slots
; ++x
) {
884 s
->slots
[x
].size
= 0;
885 s
->slots
[x
].slot_id
= x
;
889 memory_listener_register(&hvf_memory_listener
, &address_space_memory
);
890 cpus_register_accel(&hvf_cpus
);
894 static void hvf_accel_class_init(ObjectClass
*oc
, void *data
)
896 AccelClass
*ac
= ACCEL_CLASS(oc
);
898 ac
->init_machine
= hvf_accel_init
;
899 ac
->allowed
= &hvf_allowed
;
902 static const TypeInfo hvf_accel_type
= {
903 .name
= TYPE_HVF_ACCEL
,
904 .parent
= TYPE_ACCEL
,
905 .class_init
= hvf_accel_class_init
,
908 static void hvf_type_init(void)
910 type_register_static(&hvf_accel_type
);
913 type_init(hvf_type_init
);