2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
4 * Based on Veertu vddh/vmm/vmx.h
6 * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <Hypervisor/hv.h>
27 #include <Hypervisor/hv_vmx.h>
32 #include "exec/address-spaces.h"
34 static inline uint64_t rreg(hv_vcpuid_t vcpu
, hv_x86_reg_t reg
)
38 if (hv_vcpu_read_register(vcpu
, reg
, &v
)) {
46 static inline void wreg(hv_vcpuid_t vcpu
, hv_x86_reg_t reg
, uint64_t v
)
48 if (hv_vcpu_write_register(vcpu
, reg
, v
)) {
54 static inline uint64_t rvmcs(hv_vcpuid_t vcpu
, uint32_t field
)
58 hv_vmx_vcpu_read_vmcs(vcpu
, field
, &v
);
63 /* write VMCS field */
64 static inline void wvmcs(hv_vcpuid_t vcpu
, uint32_t field
, uint64_t v
)
66 hv_vmx_vcpu_write_vmcs(vcpu
, field
, v
);
69 /* desired control word constrained by hardware/hypervisor capabilities */
70 static inline uint64_t cap2ctrl(uint64_t cap
, uint64_t ctrl
)
72 return (ctrl
| (cap
& 0xffffffff)) & (cap
>> 32);
75 #define VM_ENTRY_GUEST_LMA (1LL << 9)
77 #define AR_TYPE_ACCESSES_MASK 1
78 #define AR_TYPE_READABLE_MASK (1 << 1)
79 #define AR_TYPE_WRITEABLE_MASK (1 << 2)
80 #define AR_TYPE_CODE_MASK (1 << 3)
81 #define AR_TYPE_MASK 0x0f
82 #define AR_TYPE_BUSY_64_TSS 11
83 #define AR_TYPE_BUSY_32_TSS 11
84 #define AR_TYPE_BUSY_16_TSS 3
87 static void enter_long_mode(hv_vcpuid_t vcpu
, uint64_t cr0
, uint64_t efer
)
92 wvmcs(vcpu
, VMCS_GUEST_IA32_EFER
, efer
);
93 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
94 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, rvmcs(vcpu
, VMCS_ENTRY_CTLS
) |
97 uint64_t guest_tr_ar
= rvmcs(vcpu
, VMCS_GUEST_TR_ACCESS_RIGHTS
);
98 if ((efer
& MSR_EFER_LME
) &&
99 (guest_tr_ar
& AR_TYPE_MASK
) != AR_TYPE_BUSY_64_TSS
) {
100 wvmcs(vcpu
, VMCS_GUEST_TR_ACCESS_RIGHTS
,
101 (guest_tr_ar
& ~AR_TYPE_MASK
) | AR_TYPE_BUSY_64_TSS
);
105 static void exit_long_mode(hv_vcpuid_t vcpu
, uint64_t cr0
, uint64_t efer
)
109 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
110 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, entry_ctls
& ~VM_ENTRY_GUEST_LMA
);
112 efer
&= ~MSR_EFER_LMA
;
113 wvmcs(vcpu
, VMCS_GUEST_IA32_EFER
, efer
);
116 static inline void macvm_set_cr0(hv_vcpuid_t vcpu
, uint64_t cr0
)
119 uint64_t pdpte
[4] = {0, 0, 0, 0};
120 uint64_t efer
= rvmcs(vcpu
, VMCS_GUEST_IA32_EFER
);
121 uint64_t old_cr0
= rvmcs(vcpu
, VMCS_GUEST_CR0
);
123 if ((cr0
& CR0_PG
) && (rvmcs(vcpu
, VMCS_GUEST_CR4
) & CR4_PAE
) &&
124 !(efer
& MSR_EFER_LME
)) {
125 address_space_rw(&address_space_memory
,
126 rvmcs(vcpu
, VMCS_GUEST_CR3
) & ~0x1f,
127 MEMTXATTRS_UNSPECIFIED
,
128 (uint8_t *)pdpte
, 32, 0);
131 for (i
= 0; i
< 4; i
++) {
132 wvmcs(vcpu
, VMCS_GUEST_PDPTE0
+ i
* 2, pdpte
[i
]);
135 wvmcs(vcpu
, VMCS_CR0_MASK
, CR0_CD
| CR0_NE
| CR0_PG
);
136 wvmcs(vcpu
, VMCS_CR0_SHADOW
, cr0
);
139 wvmcs(vcpu
, VMCS_GUEST_CR0
, cr0
| CR0_NE
| CR0_ET
);
141 if (efer
& MSR_EFER_LME
) {
142 if (!(old_cr0
& CR0_PG
) && (cr0
& CR0_PG
)) {
143 enter_long_mode(vcpu
, cr0
, efer
);
145 if (/*(old_cr0 & CR0_PG) &&*/ !(cr0
& CR0_PG
)) {
146 exit_long_mode(vcpu
, cr0
, efer
);
150 hv_vcpu_invalidate_tlb(vcpu
);
154 static inline void macvm_set_cr4(hv_vcpuid_t vcpu
, uint64_t cr4
)
156 uint64_t guest_cr4
= cr4
| CR4_VMXE
;
158 wvmcs(vcpu
, VMCS_GUEST_CR4
, guest_cr4
);
159 wvmcs(vcpu
, VMCS_CR4_SHADOW
, cr4
);
161 hv_vcpu_invalidate_tlb(vcpu
);
165 static inline void macvm_set_rip(CPUState
*cpu
, uint64_t rip
)
169 /* BUG, should take considering overlap.. */
170 wreg(cpu
->hvf_fd
, HV_X86_RIP
, rip
);
172 /* after moving forward in rip, we need to clean INTERRUPTABILITY */
173 val
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
);
174 if (val
& (VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
175 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
)) {
176 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
,
177 val
& ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
178 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
));
182 static inline void vmx_clear_nmi_blocking(CPUState
*cpu
)
184 X86CPU
*x86_cpu
= X86_CPU(cpu
);
185 CPUX86State
*env
= &x86_cpu
->env
;
187 env
->hflags2
&= ~HF2_NMI_MASK
;
188 uint32_t gi
= (uint32_t) rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
);
189 gi
&= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING
;
190 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
, gi
);
193 static inline void vmx_set_nmi_blocking(CPUState
*cpu
)
195 X86CPU
*x86_cpu
= X86_CPU(cpu
);
196 CPUX86State
*env
= &x86_cpu
->env
;
198 env
->hflags2
|= HF2_NMI_MASK
;
199 uint32_t gi
= (uint32_t)rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
);
200 gi
|= VMCS_INTERRUPTIBILITY_NMI_BLOCKING
;
201 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
, gi
);
204 static inline void vmx_set_nmi_window_exiting(CPUState
*cpu
)
207 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
208 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
|
209 VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING
);
213 static inline void vmx_clear_nmi_window_exiting(CPUState
*cpu
)
217 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
218 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
&
219 ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING
);